]> git.dujemihanovic.xyz Git - linux.git/commitdiff
af_unix: Move spin_lock() in manage_oob().
authorKuniyuki Iwashima <kuniyu@amazon.com>
Thu, 5 Sep 2024 19:32:39 +0000 (12:32 -0700)
committerJakub Kicinski <kuba@kernel.org>
Tue, 10 Sep 2024 00:14:26 +0000 (17:14 -0700)
When OOB skb has been already consumed, manage_oob() returns the next
skb if exists.  In such a case, we need to fall back to the else branch
below.

Then, we want to keep holding spin_lock(&sk->sk_receive_queue.lock).

Let's move it out of if-else branch and add lightweight check before
spin_lock() for major use cases without OOB skb.

Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20240905193240.17565-4-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/unix/af_unix.c

index 91d7877a107943e37216d21a69cfbbbbfd95517d..159d78fc3d14dc7a6dd0d1200d6c10df0f562695 100644 (file)
@@ -2657,9 +2657,12 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
        struct sk_buff *read_skb = NULL, *unread_skb = NULL;
        struct unix_sock *u = unix_sk(sk);
 
-       if (!unix_skb_len(skb)) {
-               spin_lock(&sk->sk_receive_queue.lock);
+       if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb)))
+               return skb;
 
+       spin_lock(&sk->sk_receive_queue.lock);
+
+       if (!unix_skb_len(skb)) {
                if (copied && (!u->oob_skb || skb == u->oob_skb)) {
                        skb = NULL;
                } else if (flags & MSG_PEEK) {
@@ -2670,14 +2673,9 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
                        __skb_unlink(read_skb, &sk->sk_receive_queue);
                }
 
-               spin_unlock(&sk->sk_receive_queue.lock);
-
-               consume_skb(read_skb);
-               return skb;
+               goto unlock;
        }
 
-       spin_lock(&sk->sk_receive_queue.lock);
-
        if (skb != u->oob_skb)
                goto unlock;
 
@@ -2698,6 +2696,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
 unlock:
        spin_unlock(&sk->sk_receive_queue.lock);
 
+       consume_skb(read_skb);
        kfree_skb(unread_skb);
 
        return skb;