tls: rx: consistently use unlocked accessors for rx_list
rx_list is protected by the socket lock, no need to take the built-in spin lock on accesses. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
f45ba67eb7
Коммит
a30295c454
|
@ -1709,7 +1709,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
|
|||
next_skb = skb_peek_next(skb, &ctx->rx_list);
|
||||
|
||||
if (!is_peek) {
|
||||
skb_unlink(skb, &ctx->rx_list);
|
||||
__skb_unlink(skb, &ctx->rx_list);
|
||||
consume_skb(skb);
|
||||
}
|
||||
|
||||
|
@ -1824,7 +1824,7 @@ int tls_sw_recvmsg(struct sock *sk,
|
|||
|
||||
ctx->recv_pkt = NULL;
|
||||
__strp_unpause(&ctx->strp);
|
||||
skb_queue_tail(&ctx->rx_list, skb);
|
||||
__skb_queue_tail(&ctx->rx_list, skb);
|
||||
|
||||
if (async) {
|
||||
/* TLS 1.2-only, to_decrypt must be text length */
|
||||
|
@ -1845,7 +1845,7 @@ leave_on_list:
|
|||
if (err != __SK_PASS) {
|
||||
rxm->offset = rxm->offset + rxm->full_len;
|
||||
rxm->full_len = 0;
|
||||
skb_unlink(skb, &ctx->rx_list);
|
||||
__skb_unlink(skb, &ctx->rx_list);
|
||||
if (err == __SK_DROP)
|
||||
consume_skb(skb);
|
||||
continue;
|
||||
|
@ -1873,7 +1873,7 @@ leave_on_list:
|
|||
decrypted += chunk;
|
||||
len -= chunk;
|
||||
|
||||
skb_unlink(skb, &ctx->rx_list);
|
||||
__skb_unlink(skb, &ctx->rx_list);
|
||||
consume_skb(skb);
|
||||
|
||||
/* Return full control message to userspace before trying
|
||||
|
@ -2173,7 +2173,7 @@ void tls_sw_release_resources_rx(struct sock *sk)
|
|||
if (ctx->aead_recv) {
|
||||
kfree_skb(ctx->recv_pkt);
|
||||
ctx->recv_pkt = NULL;
|
||||
skb_queue_purge(&ctx->rx_list);
|
||||
__skb_queue_purge(&ctx->rx_list);
|
||||
crypto_free_aead(ctx->aead_recv);
|
||||
strp_stop(&ctx->strp);
|
||||
/* If tls_sw_strparser_arm() was not called (cleanup paths)
|
||||
|
|
Загрузка…
Ссылка в новой задаче