Skip to content

Commit

Permalink
Changes representative of linux-5.14.0-427.20.1.el9_4.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed May 23, 2024
1 parent fae79c4 commit de6aec4
Show file tree
Hide file tree
Showing 14 changed files with 186 additions and 86 deletions.
2 changes: 1 addition & 1 deletion Makefile.rhelver
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ RHEL_MINOR = 4
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 427.18.1
RHEL_RELEASE = 427.20.1

#
# ZSTREAM
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/mce.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ static inline void cmci_recheck(void) {}
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
bool mce_is_correctable(struct mce *m);
int mce_usable_address(struct mce *m);
bool mce_usable_address(struct mce *m);

DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
Expand Down
68 changes: 63 additions & 5 deletions arch/x86/kernel/cpu/mce/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -705,17 +705,75 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
deferred_error_interrupt_enable(c);
}

bool amd_mce_is_memory_error(struct mce *m)
/*
* DRAM ECC errors are reported in the Northbridge (bank 4) with
* Extended Error Code 8.
*/
static bool legacy_mce_is_memory_error(struct mce *m)
{
return m->bank == 4 && XEC(m->status, 0x1f) == 8;
}

/*
* DRAM ECC errors are reported in Unified Memory Controllers with
* Extended Error Code 0.
*/
static bool smca_mce_is_memory_error(struct mce *m)
{
enum smca_bank_types bank_type;
/* ErrCodeExt[20:16] */
u8 xec = (m->status >> 16) & 0x1f;

if (XEC(m->status, 0x3f))
return false;

bank_type = smca_get_bank_type(m->extcpu, m->bank);

return bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2;
}

bool amd_mce_is_memory_error(struct mce *m)
{
if (mce_flags.smca)
return (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && xec == 0x0;
return smca_mce_is_memory_error(m);
else
return legacy_mce_is_memory_error(m);
}

/*
* AMD systems do not have an explicit indicator that the value in MCA_ADDR is
* a system physical address. Therefore, individual cases need to be detected.
* Future cases and checks will be added as needed.
*
* 1) General case
* a) Assume address is not usable.
* 2) Poison errors
* a) Indicated by MCA_STATUS[43]: poison. Defined for all banks except legacy
* northbridge (bank 4).
* b) Refers to poison consumption in the core. Does not include "no action",
* "action optional", or "deferred" error severities.
* c) Will include a usable address so that immediate action can be taken.
* 3) Northbridge DRAM ECC errors
* a) Reported in legacy bank 4 with extended error code (XEC) 8.
* b) MCA_STATUS[43] is *not* defined as poison in legacy bank 4. Therefore,
* this bit should not be checked.
*
* NOTE: SMCA UMC memory errors fall into case #1.
*/
bool amd_mce_usable_address(struct mce *m)
{
/* Check special northbridge case 3) first. */
if (!mce_flags.smca) {
if (legacy_mce_is_memory_error(m))
return true;
else if (m->bank == 4)
return false;
}

return m->bank == 4 && xec == 0x8;
/* Check poison bit for all other bank types. */
if (m->status & MCI_STATUS_POISON)
return true;

/* Assume address is not usable for all others. */
return false;
}

static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
Expand Down
32 changes: 11 additions & 21 deletions arch/x86/kernel/cpu/mce/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -468,32 +468,22 @@ static void mce_irq_work_cb(struct irq_work *entry)
mce_schedule_work();
}

/*
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
* be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses up to page granularity for now.
*/
int mce_usable_address(struct mce *m)
bool mce_usable_address(struct mce *m)
{
if (!(m->status & MCI_STATUS_ADDRV))
return 0;

/* Checks after this one are Intel/Zhaoxin-specific: */
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return 1;

if (!(m->status & MCI_STATUS_MISCV))
return 0;
return false;

if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
return 0;
switch (m->cpuvendor) {
case X86_VENDOR_AMD:
return amd_mce_usable_address(m);

if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
return 0;
case X86_VENDOR_INTEL:
case X86_VENDOR_ZHAOXIN:
return intel_mce_usable_address(m);

return 1;
default:
return true;
}
}
EXPORT_SYMBOL_GPL(mce_usable_address);

Expand Down
20 changes: 20 additions & 0 deletions arch/x86/kernel/cpu/mce/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -536,3 +536,23 @@ bool intel_filter_mce(struct mce *m)

return false;
}

/*
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
* be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses up to page granularity for now.
*/
bool intel_mce_usable_address(struct mce *m)
{
if (!(m->status & MCI_STATUS_MISCV))
return false;

if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
return false;

if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
return false;

return true;
}
4 changes: 4 additions & 0 deletions arch/x86/kernel/cpu/mce/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ void intel_init_cmci(void);
void intel_init_lmce(void);
void intel_clear_lmce(void);
bool intel_filter_mce(struct mce *m);
bool intel_mce_usable_address(struct mce *m);
#else
# define cmci_intel_adjust_timer mce_adjust_timer_default
static inline bool mce_intel_cmci_poll(void) { return false; }
Expand All @@ -58,6 +59,7 @@ static inline void intel_init_cmci(void) { }
static inline void intel_init_lmce(void) { }
static inline void intel_clear_lmce(void) { }
static inline bool intel_filter_mce(struct mce *m) { return false; }
static inline bool intel_mce_usable_address(struct mce *m) { return false; }
#endif

void mce_timer_kick(unsigned long interval);
Expand Down Expand Up @@ -204,6 +206,7 @@ extern bool filter_mce(struct mce *m);

#ifdef CONFIG_X86_MCE_AMD
extern bool amd_filter_mce(struct mce *m);
bool amd_mce_usable_address(struct mce *m);

/*
* If MCA_CONFIG[McaLsbInStatusSupported] is set, extract ErrAddr in bits
Expand Down Expand Up @@ -231,6 +234,7 @@ static __always_inline void smca_extract_err_addr(struct mce *m)

#else
static inline bool amd_filter_mce(struct mce *m) { return false; }
static inline bool amd_mce_usable_address(struct mce *m) { return false; }
static inline void smca_extract_err_addr(struct mce *m) { }
#endif

Expand Down
6 changes: 3 additions & 3 deletions drivers/net/ethernet/chelsio/cxgb4/sge.c
Original file line number Diff line number Diff line change
Expand Up @@ -2684,12 +2684,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;

q = &adap->sge.ethtxq[pi->first_qset];
__netif_tx_lock(q->txq, smp_processor_id());
__netif_tx_lock_bh(q->txq);

reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
__netif_tx_unlock(q->txq);
__netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}

Expand Down Expand Up @@ -2724,7 +2724,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
__netif_tx_unlock(q->txq);
__netif_tx_unlock_bh(q->txq);

/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
Expand Down
7 changes: 3 additions & 4 deletions drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,10 +328,9 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,

if (offload->tso_segs) {
qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
IDPF_TXD_CTX_QW1_TSO_LEN_M;
qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) &
IDPF_TXD_CTX_QW1_MSS_M;
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_TSO_LEN_M,
offload->tso_len);
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);

u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.tx.lso_pkts);
Expand Down
62 changes: 29 additions & 33 deletions drivers/net/ethernet/intel/idpf/idpf_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -505,9 +505,9 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)

/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
(!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
IDPF_RX_BI_GEN_S);
FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
FIELD_PREP(IDPF_RX_BI_GEN_M,
test_bit(__IDPF_Q_GEN_CHK, refillq->flags));

if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
Expand Down Expand Up @@ -1828,14 +1828,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
u16 gen;

/* if the descriptor isn't done, no work yet to do */
gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
gen = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
break;

/* Find necessary info of TX queue to clean buffers */
rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
dev_err(&complq->vport->adapter->pdev->dev,
Expand All @@ -1845,9 +1845,8 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];

/* Determine completion type */
ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
IDPF_TXD_COMPLQ_COMPL_TYPE_S;
ctype = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
Expand Down Expand Up @@ -1948,11 +1947,10 @@ void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
u16 td_cmd, u16 size)
{
desc->q.qw1.cmd_dtype =
cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
desc->q.qw1.cmd_dtype |=
cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
IDPF_FLEX_TXD_QW1_CMD_M);
desc->q.qw1.buf_size = cpu_to_le16((u16)size);
le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
desc->q.qw1.buf_size = cpu_to_le16(size);
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
}

Expand Down Expand Up @@ -2846,8 +2844,9 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
qword1);
csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
le16_to_cpu(rx_desc->ptype_err_fflags0));
csum->raw_csum_inv =
le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
}

Expand Down Expand Up @@ -2941,8 +2940,10 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
struct idpf_rx_ptype_decoded decoded;
u16 rx_ptype;

rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
le16_to_cpu(rx_desc->ptype_err_fflags0));
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);

skb->protocol = eth_type_trans(skb, rxq->vport->netdev);

decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
Expand All @@ -2954,10 +2955,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, &decoded);

skb->protocol = eth_type_trans(skb, rxq->vport->netdev);

if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
le16_to_cpu(rx_desc->hdrlen_flags)))
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);

idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
Expand Down Expand Up @@ -3151,8 +3150,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
dma_rmb();

/* if the descriptor isn't done, no work yet to do */
gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);

if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
break;
Expand All @@ -3167,9 +3166,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
continue;
}

pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
pkt_len);
pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);

hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
rx_desc->status_err0_qw1);
Expand All @@ -3186,14 +3184,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
goto bypass_hsplit;
}

hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
hdr_len);
hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);

bypass_hsplit:
bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
bufq_id);
bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);

rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
if (!bufq_id)
Expand Down
8 changes: 5 additions & 3 deletions drivers/pci/pci-driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -324,11 +324,13 @@ static long local_pci_probe(void *_ddi)
pci_dev->driver = pci_drv;

#ifdef CONFIG_RHEL_DIFFERENCES
if (pci_rh_check_status(pci_dev))
return -EACCES;
rc = -EACCES;
if (!pci_rh_check_status(pci_dev))
rc = pci_drv->probe(pci_dev, ddi->id);
#else
rc = pci_drv->probe(pci_dev, ddi->id);
#endif

rc = pci_drv->probe(pci_dev, ddi->id);
if (!rc)
return rc;
if (rc < 0) {
Expand Down
Loading

0 comments on commit de6aec4

Please sign in to comment.