Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

subsys: net: ip: tcp: Fix kernel crash #68

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 30 additions & 1 deletion subsys/net/ip/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,20 @@ static int tcp_conn_unref(struct tcp *conn)

tcp_send_queue_flush(conn);

k_work_cancel_delayable(&conn->send_data_timer);
/* cancel all possible delayed work and prevent any execution of newly scheduled work
* in one of the worker executors
* essential because the container holding the work is destructed at the bottom.
* A current ongoing execution might access the connection and schedule new work
* Solution:
* While holding the lock, cancel all delayable work.
* Because every delayable work execution takes the same lock and releases the lock
* we're either here, or currently executing one of the workers.
* Then, after cancelling the workers within the lock, either those workers have finished
* or have been cancelled and will not execute anymore
*/
k_mutex_lock(&conn->lock, K_FOREVER);

(void)k_work_cancel_delayable(&conn->send_data_timer);
tcp_pkt_unref(conn->send_data);

if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
Expand All @@ -461,6 +474,10 @@ static int tcp_conn_unref(struct tcp *conn)
(void)k_work_cancel_delayable(&conn->fin_timer);
(void)k_work_cancel_delayable(&conn->persist_timer);
(void)k_work_cancel_delayable(&conn->ack_timer);
(void)k_work_cancel_delayable(&conn->send_timer);
(void)k_work_cancel_delayable(&conn->recv_queue_timer);

k_mutex_unlock(&conn->lock);

sys_slist_find_and_remove(&tcp_conns, &conn->next);

Expand Down Expand Up @@ -566,10 +583,12 @@ static void tcp_send_process(struct k_work *work)
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_timer);
bool unref;

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

unref = tcp_send_process_no_lock(conn);

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);

if (unref) {
Expand Down Expand Up @@ -1262,6 +1281,7 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, recv_queue_timer);

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

NET_DBG("Cleanup recv queue conn %p len %zd seq %u", conn,
Expand All @@ -1271,6 +1291,7 @@ static void tcp_cleanup_recv_queue(struct k_work *work)
net_buf_unref(conn->queue_recv_data->buffer);
conn->queue_recv_data->buffer = NULL;

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);
}

Expand All @@ -1282,6 +1303,7 @@ static void tcp_resend_data(struct k_work *work)
int ret;
int exp_tcp_rto;

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

NET_DBG("send_data_retries=%hu", conn->send_data_retries);
Expand Down Expand Up @@ -1337,6 +1359,7 @@ static void tcp_resend_data(struct k_work *work)
K_MSEC(exp_tcp_rto));

out:
/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);

if (conn_unref) {
Expand All @@ -1349,6 +1372,7 @@ static void tcp_timewait_timeout(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, timewait_timer);

/* no need to acquire the conn->lock as there is nothing scheduled here */
NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL));

(void)tcp_conn_close(conn, -ETIMEDOUT);
Expand All @@ -1367,6 +1391,7 @@ static void tcp_fin_timeout(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer);

/* no need to acquire the conn->lock as there is nothing scheduled here */
if (conn->state == TCP_SYN_RECEIVED) {
tcp_establish_timeout(conn);
return;
Expand All @@ -1383,6 +1408,7 @@ static void tcp_send_zwp(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, persist_timer);

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

(void)tcp_out_ext(conn, ACK, NULL, conn->seq - 1);
Expand All @@ -1406,6 +1432,7 @@ static void tcp_send_zwp(struct k_work *work)
&tcp_work_q, &conn->persist_timer, K_MSEC(timeout));
}

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);
}

Expand All @@ -1414,10 +1441,12 @@ static void tcp_send_ack(struct k_work *work)
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
struct tcp *conn = CONTAINER_OF(dwork, struct tcp, ack_timer);

/* take the lock to prevent a race-condition with tcp_conn_unref */
k_mutex_lock(&conn->lock, K_FOREVER);

tcp_out(conn, ACK);

/* release the lock only after possible scheduling of work */
k_mutex_unlock(&conn->lock);
}

Expand Down
Loading