Skip to content

Commit

Permalink
net: ip: tcp: Fix kernel crash
Browse files Browse the repository at this point in the history
Fixing kernel crash caused by memory release
while having a scheduler worker pending

Signed-off-by: Jeroen van Dooren <jeroen.van.dooren@nobleo.nl>
  • Loading branch information
JvanDooren committed Oct 6, 2023
1 parent b2bf05b commit c3a5960
Showing 1 changed file with 30 additions and 1 deletion.
31 changes: 30 additions & 1 deletion subsys/net/ip/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,20 @@ static int tcp_conn_unref(struct tcp *conn)

tcp_send_queue_flush(conn);

k_work_cancel_delayable(&conn->send_data_timer);
/* cancel all possible delayed work and prevent any execution of newly scheduled work
* in one of the worker executors
* essential because the container holding the work is destructed at the bottom.
* A current ongoing execution might access the connection and schedule new work
* Solution:
* While holding the lock, cancel all delayable work.
* Because every delayable work execution takes the same lock and releases the lock
* we're either here, or currently executing one of the workers.
* Then, after cancelling the workers within the lock, either those workers have finished
* or have been cancelled and will not execute anymore
*/
k_mutex_lock(&conn->lock, K_FOREVER);

(void)k_work_cancel_delayable(&conn->send_data_timer);
tcp_pkt_unref(conn->send_data);

if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
Expand All @@ -461,6 +474,10 @@ static int tcp_conn_unref(struct tcp *conn)
(void)k_work_cancel_delayable(&conn->fin_timer);
(void)k_work_cancel_delayable(&conn->persist_timer);
(void)k_work_cancel_delayable(&conn->ack_timer);
(void)k_work_cancel_delayable(&conn->send_timer);
(void)k_work_cancel_delayable(&conn->recv_queue_timer);

k_mutex_unlock(&conn->lock);

sys_slist_find_and_remove(&tcp_conns, &conn->next);

Expand Down Expand Up @@ -566,10 +583,12 @@ static void tcp_send_process(struct k_work *work)
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_timer);
bool unref;

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

unref = tcp_send_process_no_lock(conn);

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);

if (unref) {
Expand Down Expand Up @@ -1262,6 +1281,7 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, recv_queue_timer);

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

NET_DBG("Cleanup recv queue conn %p len %zd seq %u", conn,
Expand All @@ -1271,6 +1291,7 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
net_buf_unref(conn->queue_recv_data->buffer);
conn->queue_recv_data->buffer = NULL;

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);
}

Expand All @@ -1282,6 +1303,7 @@ static void tcp_resend_data(struct k_work *work)
int ret;
int exp_tcp_rto;

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

NET_DBG("send_data_retries=%hu", conn->send_data_retries);
Expand Down Expand Up @@ -1337,6 +1359,7 @@ static void tcp_resend_data(struct k_work *work)
K_MSEC(exp_tcp_rto));

out:
/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);

if (conn_unref) {
Expand All @@ -1349,6 +1372,7 @@ static void tcp_timewait_timeout(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, timewait_timer);

/* no need to acquire the conn->lock as there is nothing scheduled here */
NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL));

(void)tcp_conn_close(conn, -ETIMEDOUT);
Expand All @@ -1367,6 +1391,7 @@ static void tcp_fin_timeout(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer);

/* no need to acquire the conn->lock as there is nothing scheduled here */
if (conn->state == TCP_SYN_RECEIVED) {
tcp_establish_timeout(conn);
return;
Expand All @@ -1383,6 +1408,7 @@ static void tcp_send_zwp(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, persist_timer);

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

(void)tcp_out_ext(conn, ACK, NULL, conn->seq - 1);
Expand All @@ -1406,6 +1432,7 @@ static void tcp_send_zwp(struct k_work *work)
&tcp_work_q, &conn->persist_timer, K_MSEC(timeout));
}

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);
}

Expand All @@ -1414,10 +1441,12 @@ static void tcp_send_ack(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, ack_timer);

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

tcp_out(conn, ACK);

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);
}

Expand Down

0 comments on commit c3a5960

Please sign in to comment.