Skip to content

Commit

Permalink
Changes representative of linux-3.10.0-1160.31.1.el7.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed May 26, 2021
1 parent 85c3fa7 commit 0db90f8
Show file tree
Hide file tree
Showing 48 changed files with 659 additions and 295 deletions.
3 changes: 2 additions & 1 deletion .gitlab-ci-private.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ variables:
realtime_check:
variables:
name: kernel-rt-rhel7
merge_tree: ${CI_MERGE_REQUEST_PROJECT_URL}
merge_tree: ${CI_MERGE_REQUEST_PROJECT_URL}.git
merge_tree_cache_owner: kernel
merge_branch: main-rt
architectures: 'x86_64'
kpet_tree_family: rhel7-rt
Expand Down
2 changes: 1 addition & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ variables:
realtime_check:
variables:
name: kernel-rt-rhel7
merge_tree: ${CI_MERGE_REQUEST_PROJECT_URL}
merge_tree: ${CI_MERGE_REQUEST_PROJECT_URL}.git
merge_branch: main-rt
architectures: 'x86_64'
kpet_tree_family: rhel7-rt
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ EXTRAVERSION =
NAME = Unicycling Gorilla
RHEL_MAJOR = 7
RHEL_MINOR = 9
RHEL_RELEASE = 1160.25.1
RHEL_RELEASE = 1160.31.1

#
# DRM backport version
Expand Down
92 changes: 5 additions & 87 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,93 +102,11 @@ static inline void destroy_context(struct mm_struct *mm)
destroy_context_ldt(mm);
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();

if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));

#ifndef CONFIG_PREEMPT_RCU
spec_ctrl_ibpb_if_different_creds(tsk);
#else
spec_ctrl_ibpb();
#endif

/*
* Re-load page tables.
*
* This logic has an ordering constraint:
*
* CPU 0: Write to a PTE for 'next'
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
* CPU 1: set bit 1 in next's mm_cpumask
* CPU 1: load from the PTE that CPU 0 writes (implicit)
*
* We need to prevent an outcome in which CPU 1 observes
* the new PTE value and CPU 0 observes bit 1 clear in
* mm_cpumask. (If that occurs, then the IPI will never
* be sent, and CPU 0's TLB will contain a stale entry.)
*
* The bad outcome can occur if either CPU's load is
* reordered before that CPU's store, so both CPUs must
* execute full barriers to prevent this from happening.
*
* Thus, switch_mm needs a full barrier between the
* store to mm_cpumask and any operation that could load
* from next->pgd. TLB fills are special and can happen
* due to instruction fetches or for no reason at all,
* and neither LOCK nor MFENCE orders them.
* Fortunately, load_cr3() is serializing and gives the
* ordering guarantee we need.
*
*/
load_cr3(next->pgd);

/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));

/* Load the LDT, if the LDT is different:
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);

if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));

/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*
* As above, load_cr3() is serializing and orders TLB
* fills with respect to the mm_cpumask write.
*/
load_cr3(next->pgd);
load_mm_ldt(next);
}
}
#endif
}
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
#define switch_mm_irqs_off switch_mm_irqs_off

#define activate_mm(prev, next) \
do { \
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/mm/Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o gup.o setup_nx.o
pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o

# Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
Expand All @@ -9,7 +9,6 @@ CFLAGS_setup_nx.o := $(nostackp)
CFLAGS_fault.o := -I$(src)/../include/asm/trace

obj-$(CONFIG_X86_PAT) += pat_rbtree.o
obj-$(CONFIG_SMP) += tlb.o

obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o

Expand Down
105 changes: 105 additions & 0 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/

#ifdef CONFIG_SMP

struct flush_tlb_info {
struct mm_struct *flush_mm;
unsigned long flush_start;
Expand All @@ -53,6 +55,108 @@ void leave_mm(int cpu)
}
EXPORT_SYMBOL_GPL(leave_mm);

#endif /* CONFIG_SMP */

void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;

local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}

void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();

if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));

#ifndef CONFIG_PREEMPT_RCU
spec_ctrl_ibpb_if_different_creds(tsk);
#else
spec_ctrl_ibpb();
#endif

/*
* Re-load page tables.
*
* This logic has an ordering constraint:
*
* CPU 0: Write to a PTE for 'next'
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
* CPU 1: set bit 1 in next's mm_cpumask
* CPU 1: load from the PTE that CPU 0 writes (implicit)
*
* We need to prevent an outcome in which CPU 1 observes
* the new PTE value and CPU 0 observes bit 1 clear in
* mm_cpumask. (If that occurs, then the IPI will never
* be sent, and CPU 0's TLB will contain a stale entry.)
*
* The bad outcome can occur if either CPU's load is
* reordered before that CPU's store, so both CPUs must
* execute full barriers to prevent this from happening.
*
* Thus, switch_mm needs a full barrier between the
* store to mm_cpumask and any operation that could load
* from next->pgd. TLB fills are special and can happen
* due to instruction fetches or for no reason at all,
* and neither LOCK nor MFENCE orders them.
* Fortunately, load_cr3() is serializing and gives the
* ordering guarantee we need.
*
*/
load_cr3(next->pgd);

/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));

/* Load the LDT, if the LDT is different:
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);

if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));

/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*
* As above, load_cr3() is serializing and orders TLB
* fills with respect to the mm_cpumask write.
*/
load_cr3(next->pgd);
load_mm_ldt(next);
}
}
#endif
}

#ifdef CONFIG_SMP

/*
* The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches]
Expand Down Expand Up @@ -275,6 +379,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
on_each_cpu(do_kernel_range_flush, &info, 1);
}
}
#endif /* CONFIG_SMP */

#ifdef CONFIG_DEBUG_TLBFLUSH
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
Expand Down
12 changes: 12 additions & 0 deletions arch/x86/platform/efi/efi_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,18 @@ void efi_switch_mm(struct mm_struct *mm)
efi_scratch.prev_mm = current->active_mm;
current->active_mm = mm;
switch_mm(efi_scratch.prev_mm, mm, NULL);
/*
* RHEL-7: switch_mm() will prematurely flip cpu_tlbstate
* back to TLBSTATE_OK for this kernel thread, which will
* potentially trigger the assertion at leave_mm(), if the
* work queued to run after the EFI thunk happens to initiate
* a TLB flush (i.e.: if a flush worker is queued after the
* efivars read/write work). We just need to make sure we're
* setting cpu_tlbstate back to TLBSTATE_LAZY after flipping
* back and forth the page tables, as in RHEL-7 kernel threads
* are expected to be in TLBSTATE_LAZY state all the way.
*/
enter_lazy_tlb(mm, current);
}

#ifdef CONFIG_EFI_MIXED
Expand Down
12 changes: 0 additions & 12 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2595,18 +2595,6 @@ void blk_account_io_start(struct request *rq, bool new_io)
part_stat_inc(cpu, part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
/*
* The partition is already being removed,
* the request will be accounted on the disk only
*
* We take a reference on disk->part0 although that
* partition will never be deleted, so we can treat
* it as any other partition.
*/
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
if (!is_mq)
part_round_stats(rq->q, cpu, part);
part_inc_in_flight(rq->q, part, rw);
Expand Down
15 changes: 12 additions & 3 deletions block/genhd.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,11 +258,12 @@ static inline int sector_in_part(struct hd_struct *part, sector_t sector)
* primarily used for stats accounting.
*
* CONTEXT:
* RCU read locked. The returned partition pointer is valid only
* while preemption is disabled.
* RCU read locked. The returned partition pointer is always valid
* because its refcount is grabbed.
*
* RETURNS:
* Found partition on success, part0 is returned if no partition matches
* or the matched partition is being deleted.
*/
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{
Expand All @@ -273,17 +274,25 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
ptbl = rcu_dereference(disk->part_tbl);

part = rcu_dereference(ptbl->last_lookup);
if (part && sector_in_part(part, sector))
if (part && sector_in_part(part, sector) && hd_struct_try_get(part))
return part;

for (i = 1; i < ptbl->len; i++) {
part = rcu_dereference(ptbl->part[i]);

if (part && sector_in_part(part, sector)) {
/*
* only live partition can be cached for lookup,
* so use-after-free on cached & deleting partition
* can be avoided
*/
if (!hd_struct_try_get(part))
break;
rcu_assign_pointer(ptbl->last_lookup, part);
return part;
}
}
hd_struct_get(&disk->part0);
return &disk->part0;
}
EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
Expand Down
13 changes: 12 additions & 1 deletion block/partition-generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,13 @@ static void delete_partition_rcu_cb(struct rcu_head *head)

void __delete_partition(struct hd_struct *part)
{
struct gendisk *disk = part_to_disk(part);
struct disk_part_tbl *ptbl =
rcu_dereference_protected(disk->part_tbl, 1);

rcu_assign_pointer(ptbl->last_lookup, NULL);
put_device(disk_to_dev(disk));

call_rcu(&part->rcu_head, delete_partition_rcu_cb);
}

Expand All @@ -258,8 +265,12 @@ void delete_partition(struct gendisk *disk, int partno)
if (!part)
return;

/*
* ->part_tbl is referenced in this part's release handler, so
* we have to hold the disk device
*/
get_device(disk_to_dev(part_to_disk(part)));
rcu_assign_pointer(ptbl->part[partno], NULL);
rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->holder_dir);
device_del(part_to_dev(part));

Expand Down
Loading

0 comments on commit 0db90f8

Please sign in to comment.