Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
Merge tag 'v4.19.105' into mptcp_v0.95
Browse files Browse the repository at this point in the history
This is the 4.19.105 stable release
  • Loading branch information
cpaasch committed Feb 20, 2020
2 parents b56d4e2 + 4fccc25 commit e0573b7
Show file tree
Hide file tree
Showing 43 changed files with 404 additions and 201 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 104
SUBLEVEL = 105
EXTRAVERSION =
NAME = "People's Front"

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/mach-npcm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ config ARCH_NPCM7XX
depends on ARCH_MULTI_V7
select PINCTRL_NPCM7XX
select NPCM7XX_TIMER
select ARCH_REQUIRE_GPIOLIB
select GPIOLIB
select CACHE_L2X0
select ARM_GIC
select HAVE_ARM_TWD if SMP
Expand Down
52 changes: 45 additions & 7 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
#define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
Expand Down Expand Up @@ -1341,17 +1339,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{},
};

#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
.desc = #cap, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \

#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
.matches = has_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.sign = s, \
.min_field_value = min_value, \

#define __HWCAP_CAP(name, cap_type, cap) \
.desc = name, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.hwcap_type = cap_type, \
.hwcap = cap, \

#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}

#define HWCAP_CAP_MATCH(match, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
.matches = match, \
}

static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
Expand Down Expand Up @@ -1387,8 +1398,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
{},
};

#ifdef CONFIG_COMPAT
static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
{
/*
* Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
* in line with that of arm32 as in vfp_init(). We make sure that the
* check is future proof, by making sure value is non-zero.
*/
u32 mvfr1;

WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
else
mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);

return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
}
#endif

static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
Expand Down
20 changes: 18 additions & 2 deletions arch/arm64/kernel/fpsimd.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ static void sve_free(struct task_struct *task)
static void task_fpsimd_load(void)
{
WARN_ON(!in_softirq() && !irqs_disabled());
WARN_ON(!system_supports_fpsimd());

if (system_supports_sve() && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(&current->thread),
Expand All @@ -238,6 +239,7 @@ void fpsimd_save(void)
struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */

WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());

if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
Expand Down Expand Up @@ -977,6 +979,7 @@ void fpsimd_bind_task_to_cpu(void)
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);

WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state;
current->thread.fpsimd_cpu = smp_processor_id();

Expand All @@ -996,6 +999,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);

WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());

last->st = st;
Expand All @@ -1008,8 +1012,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
*/
void fpsimd_restore_current_state(void)
{
if (!system_supports_fpsimd())
/*
* For the tasks that were created before we detected the absence of
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
* e.g, init. This could be then inherited by the children processes.
* If we later detect that the system doesn't support FP/SIMD,
* we must clear the flag for all the tasks to indicate that the
* FPSTATE is clean (as we can't have one) to avoid looping for ever in
* do_notify_resume().
*/
if (!system_supports_fpsimd()) {
clear_thread_flag(TIF_FOREIGN_FPSTATE);
return;
}

local_bh_disable();

Expand All @@ -1028,7 +1043,7 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
if (!system_supports_fpsimd())
if (WARN_ON(!system_supports_fpsimd()))
return;

local_bh_disable();
Expand All @@ -1055,6 +1070,7 @@ void fpsimd_flush_task_state(struct task_struct *t)

void fpsimd_flush_cpu_state(void)
{
WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
Expand Down
7 changes: 7 additions & 0 deletions arch/arm64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,13 @@ static void ssbs_thread_switch(struct task_struct *next)
if (unlikely(next->flags & PF_KTHREAD))
return;

/*
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
if (cpu_have_feature(cpu_feature(SSBS)))
return;

/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
Expand Down
10 changes: 9 additions & 1 deletion arch/arm64/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,15 @@
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
/*
* When the system doesn't support FP/SIMD, we cannot rely on
* the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
* abort on the very first access to FP and thus we should never
* see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
* trap the accesses.
*/
if (!system_supports_fpsimd() ||
vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);

Expand Down
2 changes: 1 addition & 1 deletion arch/s390/include/asm/timex.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)

static inline unsigned long long get_tod_clock(void)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
char clk[STORE_CLOCK_EXT_SIZE];

get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
Expand Down
1 change: 1 addition & 0 deletions arch/x86/events/amd/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/events/intel/ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -1402,6 +1402,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count);

local64_set(&hwc->period_left, -new);

perf_event_update_userpage(event);

return 0;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#define CMPXCHG cmpxchg
#else
#define CMPXCHG cmpxchg64
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2968,6 +2968,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)

static int get_ept_level(struct kvm_vcpu *vcpu)
{
/* Nested EPT currently only supports 4-level walks. */
if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
return 4;
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5;
return 4;
Expand Down
4 changes: 2 additions & 2 deletions drivers/hwmon/pmbus/ltc2978.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,

#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */

#define LTC_NOT_BUSY BIT(5)
#define LTC_NOT_PENDING BIT(4)
#define LTC_NOT_BUSY BIT(6)
#define LTC_NOT_PENDING BIT(5)

/*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
Expand Down
24 changes: 9 additions & 15 deletions drivers/infiniband/core/security.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,22 +336,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
if (!new_pps)
return NULL;

if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
if (!qp_pps) {
new_pps->main.port_num = qp_attr->port_num;
new_pps->main.pkey_index = qp_attr->pkey_index;
} else {
new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
qp_attr->port_num :
qp_pps->main.port_num;

new_pps->main.pkey_index =
(qp_attr_mask & IB_QP_PKEY_INDEX) ?
qp_attr->pkey_index :
qp_pps->main.pkey_index;
}
if (qp_attr_mask & IB_QP_PORT)
new_pps->main.port_num =
(qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
if (qp_attr_mask & IB_QP_PKEY_INDEX)
new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
qp_attr->pkey_index;
if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
new_pps->main.state = IB_PORT_PKEY_VALID;
} else if (qp_pps) {

if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
Expand Down
15 changes: 7 additions & 8 deletions drivers/infiniband/core/uverbs_cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -2914,12 +2914,6 @@ static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
return 0;
}

static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
{
/* Returns user space filter size, includes padding */
return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
}

static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
u16 ib_real_filter_sz)
{
Expand Down Expand Up @@ -3063,11 +3057,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
ssize_t kern_filter_sz;
size_t kern_filter_sz;
void *kern_spec_mask;
void *kern_spec_val;

kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
if (check_sub_overflow((size_t)kern_spec->hdr.size,
sizeof(struct ib_uverbs_flow_spec_hdr),
&kern_filter_sz))
return -EINVAL;

kern_filter_sz /= 2;

kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
Expand Down
2 changes: 2 additions & 0 deletions drivers/infiniband/hw/hfi1/affinity.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
}

free_cpumask_var(available_cpus);
free_cpumask_var(non_intr_cpus);
return 0;

fail:
Expand Down
Loading

0 comments on commit e0573b7

Please sign in to comment.