Skip to content

Commit

Permalink
Changes representative of linux-3.10.0-957.21.3.el7.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Jun 14, 2019
1 parent fb2eb75 commit 7238569
Show file tree
Hide file tree
Showing 13 changed files with 62 additions and 11 deletions.
8 changes: 8 additions & 0 deletions Documentation/networking/ip-sysctl.txt
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,14 @@ tcp_base_mss - INTEGER
Path MTU discovery (MTU probing). If MTU probing is enabled,
this is the initial MSS used by the connection.

tcp_min_snd_mss - INTEGER
TCP SYN and SYNACK messages usually advertise an ADVMSS option,
as described in RFC 1122 and RFC 6691.
If this ADVMSS option is smaller than tcp_min_snd_mss,
it is silently capped to tcp_min_snd_mss.

Default : 48 (at least 8 bytes of payload per segment)

tcp_congestion_control - STRING
Set the congestion control algorithm to be used for new
connections. The algorithm "reno" is always available, but
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ EXTRAVERSION =
NAME = Unicycling Gorilla
RHEL_MAJOR = 7
RHEL_MINOR = 6
RHEL_RELEASE = 957.21.2
RHEL_RELEASE = 957.21.3

#
# DRM backport version
Expand Down
2 changes: 2 additions & 0 deletions include/linux/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -413,4 +413,6 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
return 0;
}

int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
int shiftlen);
#endif /* _LINUX_TCP_H */
1 change: 1 addition & 0 deletions include/net/net_namespace.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ struct net {
/* upstream has this as part of netns_ipv4 */
RH_KABI_EXTEND(struct fib_notifier_ops *ipv4_ipmr_notifier_ops)
RH_KABI_EXTEND(unsigned int ipv4_ipmr_seq) /* protected by rtnl_mutex */
RH_KABI_EXTEND(int ipv4_sysctl_tcp_min_snd_mss)
};

/*
Expand Down
2 changes: 2 additions & 0 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);

#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
#define TCP_MIN_SND_MSS 48
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)

/*
* Never offer a window over 32767 without using window scaling. Some
Expand Down
2 changes: 2 additions & 0 deletions include/uapi/linux/snmp.h
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,8 @@ enum
LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
LINUX_MIB_PFMEMALLOCDROP,
LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
#endif
__LINUX_MIB_MAX
};
Expand Down
1 change: 1 addition & 0 deletions net/ipv4/proc.c
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
SNMP_MIB_SENTINEL
};

Expand Down
11 changes: 11 additions & 0 deletions net/ipv4/sysctl_net_ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
static int tcp_min_snd_mss_max = 65535;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
static int tcp_syn_retries_min = 1;
Expand Down Expand Up @@ -953,6 +955,15 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "tcp_min_snd_mss",
.data = &init_net.ipv4_sysctl_tcp_min_snd_mss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &tcp_min_snd_mss_min,
.extra2 = &tcp_min_snd_mss_max,
},
{ }
};

Expand Down
1 change: 1 addition & 0 deletions net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -3102,6 +3102,7 @@ void __init tcp_init(void)
int max_rshare, max_wshare, cnt;
unsigned int i;

BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));

percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
Expand Down
31 changes: 23 additions & 8 deletions net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -1300,13 +1300,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Shift newly-SACKed bytes from this skb to the immediately previous
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
*/
static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
struct sk_buff *skb,
struct tcp_sacktag_state *state,
unsigned int pcount, int shifted, int mss,
bool dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
u32 end_seq = start_seq + shifted; /* end of newly-SACKed */

Expand All @@ -1329,7 +1329,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->seq += shifted;

skb_shinfo(prev)->gso_segs += pcount;
BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
skb_shinfo(skb)->gso_segs -= pcount;

/* When we're adding to gso_segs == 1, gso_size will be zero,
Expand Down Expand Up @@ -1395,6 +1395,21 @@ static int skb_can_shift(const struct sk_buff *skb)
return !skb_headlen(skb) && skb_is_nonlinear(skb);
}

int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
int pcount, int shiftlen)
{
/* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
* Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
* to make sure not storing more than 65535 * 8 bytes per skb,
* even if current MSS is bigger.
*/
if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
return 0;
if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
return 0;
return skb_shift(to, from, shiftlen);
}

/* Try collapsing SACK blocks spanning across multiple skbs to a single
* skb.
*/
Expand Down Expand Up @@ -1500,9 +1515,9 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
goto fallback;

if (!skb_shift(prev, skb, len))
if (!tcp_skb_shift(prev, skb, pcount, len))
goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
goto out;

/* Hole filled allows collapsing with the next as well, this is very
Expand All @@ -1519,11 +1534,11 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
goto out;

len = skb->len;
if (skb_shift(prev, skb, len)) {
if (tcp_skb_shift(prev, skb, tcp_skb_pcount(skb), len)) {
pcount += tcp_skb_pcount(skb);
tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
len, mss, 0);
}

out:
state->fack_count += pcount;
return prev;
Expand Down
1 change: 1 addition & 0 deletions net/ipv4/tcp_ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -2519,6 +2519,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_ecn = 2;

net->ipv4_sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
net->ipv4_sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
net->ipv4_sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
net->ipv4_sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;

Expand Down
9 changes: 7 additions & 2 deletions net/ipv4/tcp_output.c
Original file line number Diff line number Diff line change
Expand Up @@ -1128,6 +1128,12 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
if (nsize < 0)
nsize = 0;

if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf) ||
skb_queue_len(&sk->sk_write_queue) > 2048) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
return -ENOMEM;
}

if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;

Expand Down Expand Up @@ -1293,8 +1299,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
mss_now -= icsk->icsk_ext_hdr_len;

/* Then reserve room for full set of TCP options and 8 bytes of data */
if (mss_now < 48)
mss_now = 48;
mss_now = max(mss_now, sock_net(sk)->ipv4_sysctl_tcp_min_snd_mss);
return mss_now;
}

Expand Down
2 changes: 2 additions & 0 deletions net/ipv4/tcp_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,14 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
icsk->icsk_mtup.enabled = 1;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
const struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
int mss;

mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
mss = min(sysctl_tcp_base_mss, mss);
mss = max(mss, 68 - tp->tcp_header_len);
mss = max(mss, net->ipv4_sysctl_tcp_min_snd_mss);
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
Expand Down

0 comments on commit 7238569

Please sign in to comment.