Skip to content

Commit

Permalink
Merge pull request torvalds#236 from zandrey/5.4.x+fslc
Browse files Browse the repository at this point in the history
Update 5.4.x+fslc to v5.4.93
  • Loading branch information
otavio authored Jan 29, 2021
2 parents ac16865 + c02d1bb commit 3e79f35
Show file tree
Hide file tree
Showing 104 changed files with 488 additions and 222 deletions.
4 changes: 4 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5452,6 +5452,10 @@
This option is obsoleted by the "nopv" option, which
has equivalent effect for XEN platform.

xen_no_vector_callback
[KNL,X86,XEN] Disable the vector callback for Xen
event channel interrupts.

xen_scrub_pages= [XEN]
Boolean option to control scrubbing pages before giving them back
to Xen, for use by other domains. Can be also changed at runtime
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 92
SUBLEVEL = 93
EXTRAVERSION =
NAME = Kleptomaniac Octopus

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/xen/enlighten.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
}
gnttab_init();
if (!xen_initial_domain())
xenbus_probe(NULL);
xenbus_probe();

/*
* Making sure board specific code will not set up ops for
Expand Down
10 changes: 5 additions & 5 deletions arch/arm64/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
#include <asm/lse.h>

#define ATOMIC_OP(op) \
static inline void arch_##op(int i, atomic_t *v) \
static __always_inline void arch_##op(int i, atomic_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
Expand All @@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP

#define ATOMIC_FETCH_OP(name, op) \
static inline int arch_##op##name(int i, atomic_t *v) \
static __always_inline int arch_##op##name(int i, atomic_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
Expand All @@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OPS

#define ATOMIC64_OP(op) \
static inline void arch_##op(long i, atomic64_t *v) \
static __always_inline void arch_##op(long i, atomic64_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
Expand All @@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP

#define ATOMIC64_FETCH_OP(name, op) \
static inline long arch_##op##name(long i, atomic64_t *v) \
static __always_inline long arch_##op##name(long i, atomic64_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
Expand All @@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS

static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
Expand Down
25 changes: 8 additions & 17 deletions arch/powerpc/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,12 @@ SECTIONS
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
_sinittext = .;
INIT_TEXT

/*
*.init.text might be RO so we must ensure this section ends on
* a page boundary.
*/
. = ALIGN(PAGE_SIZE);
_einittext = .;
#ifdef CONFIG_PPC64
*(.tramp.ftrace.init);
Expand All @@ -223,21 +229,9 @@ SECTIONS
EXIT_TEXT
}

.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
}

.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
INIT_SETUP(16)
}
. = ALIGN(PAGE_SIZE);

.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
INIT_CALLS
}

.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
CON_INITCALL
}
INIT_DATA_SECTION(16)

. = ALIGN(8);
__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
Expand Down Expand Up @@ -265,9 +259,6 @@ SECTIONS
__stop___fw_ftr_fixup = .;
}
#endif
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
INIT_RAM_FS
}

PERCPU_SECTION(L1_CACHE_BYTES)

Expand Down
1 change: 1 addition & 0 deletions arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@
phy-mode = "gmii";
phy-handle = <&phy0>;
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-id0007.0771";
reg = <0>;
};
};
Expand Down
2 changes: 2 additions & 0 deletions arch/riscv/configs/defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
Expand Down
3 changes: 3 additions & 0 deletions arch/riscv/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
* Copyright (C) 2017 SiFive
*/

#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
Expand All @@ -24,5 +25,7 @@ void __init time_init(void)
riscv_timebase = prop;

lpj_fine = riscv_timebase / HZ;

of_clk_init(NULL);
timer_probe();
}
3 changes: 1 addition & 2 deletions arch/sh/drivers/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ config PVR2_DMA

config G2_DMA
tristate "G2 Bus DMA support"
depends on SH_DREAMCAST
select SH_DMA_API
depends on SH_DREAMCAST && SH_DMA_API
help
This enables support for the DMA controller for the Dreamcast's
G2 bus. Drivers that want this will generally enable this on
Expand Down
15 changes: 13 additions & 2 deletions arch/x86/include/asm/fpu/api.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,25 @@
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* disables preemption so be careful if you intend to use it for long periods
* of time.
* If you intend to use the FPU in softirq you need to check first with
* If you intend to use the FPU in irq/softirq you need to check first with
* irq_fpu_usable() if it is possible.
*/
extern void kernel_fpu_begin(void);

/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */

extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
extern void fpregs_mark_activate(void);

/* Code that is unaware of kernel_fpu_begin_mask() can use this */
static inline void kernel_fpu_begin(void)
{
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
}

/*
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
* A context switch will (and softirq might) save CPU's FPU registers to
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)

extern unsigned int __max_die_per_package;

#ifdef CONFIG_SMP
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
Expand All @@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)

extern unsigned int __max_die_per_package;

static inline int topology_max_die_per_package(void)
{
return __max_die_per_package;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -545,12 +545,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
u32 ecx;

ecx = cpuid_ecx(0x8000001e);
nodes_per_socket = ((ecx >> 8) & 7) + 1;
__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
u64 value;

rdmsrl(MSR_FAM10H_NODE_ID, value);
nodes_per_socket = ((value >> 3) & 7) + 1;
__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
}

if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/topology.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)

#ifdef CONFIG_SMP
unsigned int __max_die_per_package __read_mostly = 1;
EXPORT_SYMBOL(__max_die_per_package);

#ifdef CONFIG_SMP
/*
* Check if given CPUID extended toplogy "leaf" is implemented
*/
Expand Down
9 changes: 5 additions & 4 deletions arch/x86/kernel/fpu/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ bool irq_fpu_usable(void)
}
EXPORT_SYMBOL(irq_fpu_usable);

void kernel_fpu_begin(void)
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
preempt_disable();

Expand All @@ -102,13 +102,14 @@ void kernel_fpu_begin(void)
}
__cpu_invalidate_fpregs_state();

if (boot_cpu_has(X86_FEATURE_XMM))
/* Put sane initial values into the control registers. */
if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT);

if (boot_cpu_has(X86_FEATURE_FPU))
if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
asm volatile ("fninit");
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);

void kernel_fpu_end(void)
{
Expand Down
20 changes: 15 additions & 5 deletions arch/x86/lib/mmx_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,16 @@
#include <asm/fpu/api.h>
#include <asm/asm.h>

/*
* Use KFPU_387. MMX instructions are not affected by MXCSR,
* but both AMD and Intel documentation states that even integer MMX
* operations will result in #MF if an exception is pending in FCW.
*
* EMMS is not needed afterwards because, after calling kernel_fpu_end(),
* any subsequent user of the 387 stack will reinitialize it using
* KFPU_387.
*/

void *_mmx_memcpy(void *to, const void *from, size_t len)
{
void *p;
Expand All @@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
p = to;
i = len >> 6; /* len/64 */

kernel_fpu_begin();
kernel_fpu_begin_mask(KFPU_387);

__asm__ __volatile__ (
"1: prefetch (%0)\n" /* This set is 28 bytes */
Expand Down Expand Up @@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
{
int i;

kernel_fpu_begin();
kernel_fpu_begin_mask(KFPU_387);

__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
Expand Down Expand Up @@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
{
int i;

kernel_fpu_begin();
kernel_fpu_begin_mask(KFPU_387);

/*
* maybe the prefetch stuff can go before the expensive fnsave...
Expand Down Expand Up @@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
{
int i;

kernel_fpu_begin();
kernel_fpu_begin_mask(KFPU_387);

__asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : :
Expand Down Expand Up @@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
{
int i;

kernel_fpu_begin();
kernel_fpu_begin_mask(KFPU_387);

__asm__ __volatile__ (
"1: prefetch (%0)\n"
Expand Down
11 changes: 10 additions & 1 deletion arch/x86/xen/enlighten_hvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
return 0;
}

static bool no_vector_callback __initdata;

static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
Expand All @@ -194,7 +196,7 @@ static void __init xen_hvm_guest_init(void)

xen_panic_handler_init();

if (xen_feature(XENFEAT_hvm_callback_vector))
if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;

xen_hvm_smp_init();
Expand All @@ -220,6 +222,13 @@ static __init int xen_parse_nopv(char *arg)
}
early_param("xen_nopv", xen_parse_nopv);

static __init int xen_parse_no_vector_callback(char *arg)
{
no_vector_callback = true;
return 0;
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);

bool __init xen_hvm_need_lapic(void)
{
if (xen_pv_domain())
Expand Down
2 changes: 2 additions & 0 deletions drivers/acpi/scan.c
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
if (!device)
return -EINVAL;

*device = NULL;

status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback);
if (ACPI_FAILURE(status) || !*device) {
Expand Down
17 changes: 16 additions & 1 deletion drivers/base/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,16 @@ int device_links_read_lock_held(void)
#endif
#endif /* !CONFIG_SRCU */

static bool device_is_ancestor(struct device *dev, struct device *target)
{
while (target->parent) {
target = target->parent;
if (dev == target)
return true;
}
return false;
}

/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
Expand All @@ -119,7 +129,12 @@ static int device_is_dependent(struct device *dev, void *target)
struct device_link *link;
int ret;

if (dev == target)
/*
* The "ancestors" check is needed to catch the case when the target
* device has not been completely initialized yet and it is still
* missing from the list of children of its parent device.
*/
if (dev == target || device_is_ancestor(dev, target))
return 1;

ret = device_for_each_child(dev, target, device_is_dependent);
Expand Down
Loading

0 comments on commit 3e79f35

Please sign in to comment.