Skip to content

Commit

Permalink
Changes representative of linux-3.10.0-1062.12.1.el7.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Dec 12, 2019
1 parent 23a592e commit d805327
Show file tree
Hide file tree
Showing 37 changed files with 864 additions and 204 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ EXTRAVERSION =
NAME = Unicycling Gorilla
RHEL_MAJOR = 7
RHEL_MINOR = 7
RHEL_RELEASE = 1062.9.1
RHEL_RELEASE = 1062.12.1

#
# DRM backport version
Expand Down
166 changes: 164 additions & 2 deletions arch/powerpc/platforms/pseries/lpar.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,22 @@ EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);

/*
* H_BLOCK_REMOVE supported block size for this page size in segment who's base
* page size is that page size.
*
* The first index is the segment base page size, the second one is the actual
* page size.
*/
static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT];

/*
* Due to the involved complexity, and that the current hypervisor is only
* returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
* buffer size to 8 size block.
*/
#define HBLKRM_SUPPORTED_BLOCK_SIZE 8

extern void pSeries_find_serial_port(void);

void vpa_init(int cpu)
Expand Down Expand Up @@ -433,6 +449,17 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
#define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
#define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL

/*
* Returned true if we are supporting this block size for the specified segment
* base page size and actual page size.
*
* Currently, we only support 8 size block.
*/
static inline bool is_supported_hlbkrm(int bpsize, int psize)
{
return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE);
}

/**
* H_BLOCK_REMOVE caller.
* @idx should point to the latest @param entry set with a PTEX.
Expand Down Expand Up @@ -591,7 +618,8 @@ static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
/* Assuming THP size is 16M */
if (is_supported_hlbkrm(psize, MMU_PAGE_16M))
hugepage_block_invalidate(slot, vpn, count, psize, ssize);
else
hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
Expand Down Expand Up @@ -747,6 +775,140 @@ static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
(void)call_block_remove(pix, param, true);
}

/*
* TLB Block Invalidate Characteristics
*
* These characteristics define the size of the block the hcall H_BLOCK_REMOVE
* is able to process for each couple segment base page size, actual page size.
*
* The ibm,get-system-parameter properties is returning a buffer with the
* following layout:
*
* [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
* -----------------
* TLB Block Invalidate Specifiers:
* [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
* [ 1 byte Number of page sizes (N) that are supported for the specified
* TLB invalidate block size ]
* [ 1 byte Encoded segment base page size and actual page size
* MSB=0 means 4k segment base page size and actual page size
* MSB=1 the penc value in mmu_psize_def ]
* ...
* -----------------
* Next TLB Block Invalidate Specifiers...
* -----------------
* [ 0 ]
*/
static inline void set_hblkrm_bloc_size(int bpsize, int psize,
unsigned int block_size)
{
if (block_size > hblkrm_size[bpsize][psize])
hblkrm_size[bpsize][psize] = block_size;
}

/*
* Decode the Encoded segment base page size and actual page size.
* PAPR specifies:
* - bit 7 is the L bit
* - bits 0-5 are the penc value
* If the L bit is 0, this means 4K segment base page size and actual page size
* otherwise the penc value should be read.
*/
#define HBLKRM_L_MASK 0x80
#define HBLKRM_PENC_MASK 0x3f
static inline void __init check_lp_set_hblkrm(unsigned int lp,
unsigned int block_size)
{
unsigned int bpsize, psize;

/* First, check the L bit, if not set, this means 4K */
if ((lp & HBLKRM_L_MASK) == 0) {
set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size);
return;
}

lp &= HBLKRM_PENC_MASK;
for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) {
struct mmu_psize_def *def = &mmu_psize_defs[bpsize];

for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
if (def->penc[psize] == lp) {
set_hblkrm_bloc_size(bpsize, psize, block_size);
return;
}
}
}
}

#define SPLPAR_TLB_BIC_TOKEN 50

/*
* The size of the TLB Block Invalidate Characteristics is variable. But at the
* maximum it will be the number of possible page sizes *2 + 10 bytes.
* Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
* (128 bytes) for the buffer to get plenty of space.
*/
#define SPLPAR_TLB_BIC_MAXLENGTH 128

void __init pseries_lpar_read_hblkrm_characteristics(void)
{
unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
int call_status, len, idx, bpsize;

if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
return;

spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
NULL,
SPLPAR_TLB_BIC_TOKEN,
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);

if (call_status != 0) {
pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
__FILE__, __func__, call_status);
return;
}

/*
* The first two (2) bytes of the data in the buffer are the length of
* the returned data, not counting these first two (2) bytes.
*/
len = be16_to_cpu(*((u16 *)local_buffer)) + 2;
if (len > SPLPAR_TLB_BIC_MAXLENGTH) {
pr_warn("%s too large returned buffer %d", __func__, len);
return;
}

idx = 2;
while (idx < len) {
u8 block_shift = local_buffer[idx++];
u32 block_size;
unsigned int npsize;

if (!block_shift)
break;

block_size = 1 << block_shift;

for (npsize = local_buffer[idx++];
npsize > 0 && idx < len; npsize--)
check_lp_set_hblkrm((unsigned int) local_buffer[idx++],
block_size);
}

for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
for (idx = 0; idx < MMU_PAGE_COUNT; idx++)
if (hblkrm_size[bpsize][idx])
pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
bpsize, idx, hblkrm_size[bpsize][idx]);
}

/*
* Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
* lock.
Expand All @@ -766,7 +928,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);

if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) {
if (is_supported_hlbkrm(batch->psize, batch->psize)) {
do_block_remove(number, batch, param);
goto out;
}
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/platforms/pseries/pseries.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,5 +95,6 @@ unsigned long pseries_memory_block_size(void);
int dlpar_workqueue_init(void);

void pseries_setup_rfi_flush(void);
void pseries_lpar_read_hblkrm_characteristics(void);

#endif /* _PSERIES_PSERIES_H */
1 change: 1 addition & 0 deletions arch/powerpc/platforms/pseries/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -631,6 +631,7 @@ static void __init pSeries_setup_arch(void)

pseries_setup_rfi_flush();
setup_stf_barrier();
pseries_lpar_read_hblkrm_characteristics();

/* By default, only probe PCI (can be overriden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY);
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static __always_inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
: "ir" (i) : "memory");
}

/**
Expand All @@ -62,7 +62,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
: "ir" (i) : "memory");
}

/**
Expand Down Expand Up @@ -93,7 +93,7 @@ static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
static __always_inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
: "+m" (v->counter) :: "memory");
}

/**
Expand All @@ -105,7 +105,7 @@ static __always_inline void atomic_inc(atomic_t *v)
static __always_inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
: "+m" (v->counter) :: "memory");
}

/**
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/include/asm/atomic64_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
: "er" (i), "m" (v->counter) : "memory");
}

/**
Expand All @@ -58,7 +58,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
: "er" (i), "m" (v->counter) : "memory");
}

/**
Expand Down Expand Up @@ -90,7 +90,7 @@ static __always_inline void atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter));
: "m" (v->counter) : "memory");
}

/**
Expand All @@ -103,7 +103,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
: "m" (v->counter));
: "m" (v->counter) : "memory");
}

/**
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ do { \
#endif

/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
#define smp_mb__before_atomic() do { } while (0)
#define smp_mb__after_atomic() do { } while (0)

/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/cpuidle_haltpoll.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#ifndef _ARCH_HALTPOLL_H
#define _ARCH_HALTPOLL_H

void arch_haltpoll_enable(void);
void arch_haltpoll_disable(void);
void arch_haltpoll_enable(unsigned int cpu);
void arch_haltpoll_disable(unsigned int cpu);

#endif
18 changes: 6 additions & 12 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -939,32 +939,26 @@ static void kvm_enable_host_haltpoll(void *i)
wrmsrl(MSR_KVM_POLL_CONTROL, 1);
}

void arch_haltpoll_enable(void)
void arch_haltpoll_enable(unsigned int cpu)
{
if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
printk(KERN_ERR "kvm: host does not support poll control\n");
printk(KERN_ERR "kvm: host upgrade recommended\n");
pr_err_once("kvm: host does not support poll control\n");
pr_err_once("kvm: host upgrade recommended\n");
return;
}

preempt_disable();
/* Enable guest halt poll disables host halt poll */
kvm_disable_host_haltpoll(NULL);
smp_call_function(kvm_disable_host_haltpoll, NULL, 1);
preempt_enable();
smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
}
EXPORT_SYMBOL_GPL(arch_haltpoll_enable);

void arch_haltpoll_disable(void)
void arch_haltpoll_disable(unsigned int cpu)
{
if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
return;

preempt_disable();
/* Enable guest halt poll disables host halt poll */
kvm_enable_host_haltpoll(NULL);
smp_call_function(kvm_enable_host_haltpoll, NULL, 1);
preempt_enable();
smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
}
EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
#endif
Loading

0 comments on commit d805327

Please sign in to comment.