Skip to content

Commit

Permalink
Changes representative of linux-4.18.0-240.22.1.el8_3.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Mar 25, 2021
1 parent 1074e81 commit 90f7f49
Show file tree
Hide file tree
Showing 81 changed files with 1,109 additions and 642 deletions.
41 changes: 41 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# This CI will only work for trusted contributors. CI for public contributors
# runs via a webhook on the merge requests. There's nothing you have to do if
# you want your changes tested -- created pipelines will be automatically
# linked in the merge request and appropriate labels will be added to it.
# Changes to this file will NOT be reflected in the testing.

workflow:
rules:
- if: $CI_MERGE_REQUEST_ID

trigger_pipeline:
variables:
git_url: ${CI_MERGE_REQUEST_PROJECT_URL}
branch: ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}
commit_hash: ${CI_COMMIT_SHA}
name: kernel-rhel8
mr_id: ${CI_MERGE_REQUEST_IID}
mr_url: ${CI_MERGE_REQUEST_PROJECT_URL}/-/merge_requests/${CI_MERGE_REQUEST_IID}
title: ${CI_COMMIT_TITLE}

send_ready_for_test_pre: 'True'
send_ready_for_test_post: 'True'
kernel_type: internal
make_target: rpm
builder_image: quay.io/cki/builder-rhel8
architectures: 'x86_64 ppc64le aarch64 s390x'
build_kabi_whitelist: 'true'
tree_yaml_name: rhel
kpet_tree_family: rhel8
package_name: kernel
publish_elsewhere: 'true'
native_tools: 'true'
# Skip testing for now to save resources. Uncomment ystream_distro and remove
# the skip_ beaker line when ready for production.
#skip_beaker: 'true'
disttag_override: '.el8_3'
trigger:
project: redhat/red-hat-ci-tools/kernel/cki-internal-pipelines/cki-internal-contributors
branch: rhel8
strategy: depend

3 changes: 3 additions & 0 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1260,6 +1260,9 @@ field userspace_addr, which must point at user addressable memory for
the entire memory slot size. Any object may back this memory, including
anonymous memory, ordinary files, and hugetlbfs.

On architectures that support a form of address tagging, userspace_addr must
be an untagged address.

It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
be identical. This allows large pages in the guest to be backed by large
pages in the host.
Expand Down
2 changes: 1 addition & 1 deletion Makefile.rhelver
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ RHEL_MINOR = 3
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 240.15.1
RHEL_RELEASE = 240.22.1

#
# Early y+1 numbering
Expand Down
6 changes: 3 additions & 3 deletions arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY, NULL);
MEMINIT_EARLY, NULL);
return 0;
}

Expand All @@ -507,8 +507,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
NULL);
memmap_init_zone(size, nid, zone, start_pfn,
MEMINIT_EARLY, NULL);
} else {
struct page *start;
struct memmap_init_callback_data args;
Expand Down
111 changes: 110 additions & 1 deletion arch/s390/crypto/arch_random.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017, 2018
* Copyright IBM Corp. 2017, 2020
* Author(s): Harald Freudenberger
*
* The s390_arch_random_generate() function may be called from random.c
Expand Down Expand Up @@ -33,9 +33,11 @@
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/workqueue.h>
#include <linux/moduleparam.h>
#include <asm/cpacf.h>

DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
EXPORT_SYMBOL(s390_arch_random_available);

atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
EXPORT_SYMBOL(s390_arch_random_counter);
Expand Down Expand Up @@ -99,6 +101,113 @@ static void arch_rng_refill_buffer(struct work_struct *unused)
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
}

/*
* Here follows the implementation of s390_arch_get_random_long().
*
* The random longs to be pulled by arch_get_random_long() are
* prepared in an 4K buffer which is filled from the NIST 800-90
* compliant s390 drbg. By default the random long buffer is refilled
* 256 times before the drbg itself needs a reseed. The reseed of the
* drbg is done with 64 bytes fetched from the high quality (but slow)
* trng which is assumed to deliver 100% entropy. So the 64 * 8 = 512
* bits of entropy are spread over 256 * 4KB = 1MB serving 131072
* arch_get_random_long() invocations before reseeded.
*
* How often the 4K random long buffer is refilled with the drbg
* before the drbg is reseeded can be adjusted. There is a module
* parameter 's390_arch_rnd_long_drbg_reseed' accessible via
* /sys/module/arch_random/parameters/rndlong_drbg_reseed
* or as kernel command line parameter
* arch_random.rndlong_drbg_reseed=<value>
* This parameter tells how often the drbg fills the 4K buffer before
* it is re-seeded by fresh entropy from the trng.
* A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64
* KB with 32 bytes of fresh entropy pulled from the trng. So a value
* of 16 would result in 512 bits entropy per 64 KB.
* A value of 256 results in 1MB of drbg output before a reseed of the
* drbg is done. So this would spread the 512 bits of entropy among 1MB.
* Setting this parameter to 0 forces the reseed to take place every
* time the 4K buffer is depleted, so the entropy rises to 512 bits
* entropy per 4K or 1 bit entropy per arch_get_random_long(). With
* setting this parameter to negative values all this effort is
* disabled, arch_get_random long() returns false and thus indicating
* that the arch_get_random_long() feature is disabled at all.
*/

static unsigned long rndlong_buf[512];
static DEFINE_SPINLOCK(rndlong_lock);
static int rndlong_buf_index;

static int rndlong_drbg_reseed = 256;
module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600);
MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg reseed");

static inline void refill_rndlong_buf(void)
{
static u8 prng_ws[240];
static int drbg_counter;

if (--drbg_counter < 0) {
/* need to re-seed the drbg */
u8 seed[64];

/* fetch seed from trng */
cpacf_trng(NULL, 0, seed, sizeof(seed));
/* seed drbg */
memset(prng_ws, 0, sizeof(prng_ws));
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_ws, NULL, 0, seed, sizeof(seed));
/* re-init counter for drbg */
drbg_counter = rndlong_drbg_reseed;
}

/* fill the arch_get_random_long buffer from drbg */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws,
(u8 *) rndlong_buf, sizeof(rndlong_buf),
NULL, 0);
}

bool s390_arch_get_random_long(unsigned long *v)
{
bool rc = false;
unsigned long flags;

/* arch_get_random_long() disabled ? */
if (rndlong_drbg_reseed < 0)
return false;

/* try to lock the random long lock */
if (!spin_trylock_irqsave(&rndlong_lock, flags))
return false;

if (--rndlong_buf_index >= 0) {
/* deliver next long value from the buffer */
*v = rndlong_buf[rndlong_buf_index];
rc = true;
goto out;
}

/* buffer is depleted and needs refill */
if (in_interrupt()) {
/* delay refill in interrupt context to next caller */
rndlong_buf_index = 0;
goto out;
}

/* refill random long buffer */
refill_rndlong_buf();
rndlong_buf_index = ARRAY_SIZE(rndlong_buf);

/* and provide one random long */
*v = rndlong_buf[--rndlong_buf_index];
rc = true;

out:
spin_unlock_irqrestore(&rndlong_lock, flags);
return rc;
}
EXPORT_SYMBOL(s390_arch_get_random_long);

static int __init s390_arch_random_init(void)
{
/* all the needed PRNO subfunctions available ? */
Expand Down
5 changes: 4 additions & 1 deletion arch/s390/include/asm/archrandom.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
/*
* Kernel interface for the s390 arch_random_* functions
*
* Copyright IBM Corp. 2017
* Copyright IBM Corp. 2017, 2020
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
Expand All @@ -19,6 +19,7 @@
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;

bool s390_arch_get_random_long(unsigned long *v);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);

static inline bool arch_has_random(void)
Expand All @@ -35,6 +36,8 @@ static inline bool arch_has_random_seed(void)

static inline bool arch_get_random_long(unsigned long *v)
{
if (static_branch_likely(&s390_arch_random_available))
return s390_arch_get_random_long(v);
return false;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kernel/early.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;

/* Running under KVM? If not we assume z/VM */
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}

Expand Down
2 changes: 2 additions & 0 deletions arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1027,6 +1027,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");

log_component_list();
init_lockdown();
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -889,7 +889,8 @@ static void init_amd(struct cpuinfo_x86 *c)
case 0x10: init_amd_gh(c); break;
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
case 0x17: init_amd_zn(c); break;
case 0x17: fallthrough;
case 0x19: init_amd_zn(c); break;
}

/*
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/cpuid.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
r = -EFAULT;
if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
return 0;
Expand Down
14 changes: 14 additions & 0 deletions arch/x86/kvm/cpuid.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
return x86_stepping(best->eax);
}

static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
{
return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
}

static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
{
return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
}

static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
{
return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
Expand Down
5 changes: 1 addition & 4 deletions arch/x86/kvm/ioapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -197,12 +197,9 @@ static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)

/*
* If no longer has pending EOI in LAPICs, update
* EOI for this vetor.
* EOI for this vector.
*/
rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
kvm_ioapic_update_eoi_one(vcpu, ioapic,
entry->fields.trig_mode,
irq);
break;
}
}
Expand Down
11 changes: 9 additions & 2 deletions arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,19 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

static inline u64 rsvd_bits(int s, int e)
static __always_inline u64 rsvd_bits(int s, int e)
{
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);

if (__builtin_constant_p(e))
BUILD_BUG_ON(e > 63);
else
e &= 63;

if (e < s)
return 0;

return ((1ULL << (e - s + 1)) - 1) << s;
return ((2ULL << (e - s)) - 1) << s;
}

void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/kvm/svm/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,10 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

if (WARN_ON(!is_guest_mode(vcpu)))
return true;

if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
Expand Down Expand Up @@ -574,6 +578,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->nested.vmcb = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);

kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);

/* in case we halted in L2 */
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;

Expand Down Expand Up @@ -693,6 +699,7 @@ void svm_leave_nested(struct vcpu_svm *svm)
leave_guest_mode(&svm->vcpu);
copy_vmcb_control_area(&vmcb->control, &hsave->control);
nested_svm_uninit_mmu_context(&svm->vcpu);
vmcb_mark_all_dirty(svm->vmcb);
}

kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
Expand Down Expand Up @@ -1135,6 +1142,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
* in the registers, the save area of the nested state instead
* contains saved L1 state.
*/

svm->nested.nested_run_pending =
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);

copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
hsave->save = *save;

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -638,8 +638,8 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
* Its safe to read more than we are asked, caller should ensure that
* destination has enough space.
*/
src_paddr = round_down(src_paddr, 16);
offset = src_paddr & 15;
src_paddr = round_down(src_paddr, 16);
sz = round_up(sz + offset, 16);

return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
Expand Down
Loading

0 comments on commit 90f7f49

Please sign in to comment.