=====Contents=====
* [[https://www.linuxfoundation.org/#Understanding_locks_in_networking|1 Understanding locks in networking]]
* [[https://www.linuxfoundation.org/#lock_sock|1.1 lock_sock]]
* [[https://www.linuxfoundation.org/#bh_lock_sock|1.2 bh_lock_sock]]
* [[https://www.linuxfoundation.org/#general_locking|1.3 general locking]]
====== Understanding locks in networking ======
lock_sock and release_sock do not hold a normal spinlock directly but instead hold the owner field and do other housework as well.
lock_sock grabs the lock sk->sk_lock.slock, disables local bottom halves and then it checks to see if there is an owner. If it does it spins until this releases, sets the owner and then releases sk->sk_lock.slock. This means bh_lock_sock can still execute even if the socket is "locked" provided of course that the lock_sock call isn't in execution at that very point in time.
release_sock grabs the sk_lock.slock, processes any receive backlog, clears the owner, wakes up any wait queue on sk_lock.wq and then releases sk_lock.slock and enables bottom halves.
bh_lock_sock and bh_release_sock just grab and release sk->sk_lock.slock
Below are code samples to help illustrate the points.
===== lock_sock =====
in include/net/sock.h
extern void FASTCALL(lock_sock(struct sock *sk));
extern void FASTCALL(release_sock(struct sock *sk));
in net/core/sock.c
void fastcall lock_sock(struct sock *sk)
{
might_sleep();
spin_lock_bh(&(sk->sk_lock.slock));
if (sk->sk_lock.owner)
__lock_sock(sk);
sk->sk_lock.owner = (void *)1;
spin_unlock_bh(&(sk->sk_lock.slock));
}
EXPORT_SYMBOL(lock_sock);
void fastcall release_sock(struct sock *sk)
{
spin_lock_bh(&(sk->sk_lock.slock));
if (sk->sk_backlog.tail)
__release_sock(sk);
sk->sk_lock.owner = NULL;
if (waitqueue_active(&(sk->sk_lock.wq)))
wake_up(&(sk->sk_lock.wq));
spin_unlock_bh(&(sk->sk_lock.slock));
}
EXPORT_SYMBOL(release_sock);
and
static void __lock_sock(struct sock *sk)
{
DEFINE_WAIT(wait);
for(;;) {
prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&sk->sk_lock.slock);
schedule();
spin_lock_bh(&sk->sk_lock.slock);
if(!sock_owned_by_user(sk))
break;
}
finish_wait(&sk->sk_lock.wq, &wait);
}
static void __release_sock(struct sock *sk)
{
struct sk_buff *skb = sk->sk_backlog.head;
do {
sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
bh_unlock_sock(sk);
do {
struct sk_buff *next = skb->next;
skb->next = NULL;
sk->sk_backlog_rcv(sk, skb);
/*
* We are in process context here with softirqs
* disabled, use cond_resched_softirq() to preempt.
* This is safe to do because we've taken the backlog
* queue private:
*/
cond_resched_softirq();
skb = next;
} while (skb != NULL);
bh_lock_sock(sk);
} while((skb = sk->sk_backlog.head) != NULL);
}
===== bh_lock_sock =====
in include/net/sock.h
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
===== general locking =====
in include/linux/spinlock.h:
#define spin_lock_bh(lock) _spin_lock_bh(lock)
in include/linux/spinlock_api_smp.h
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
in include/linux/spinlock_api_up.h:
#define _spin_lock_bh(lock) __LOCK_BH(lock)
#define _spin_lock(lock) __LOCK(lock)
#define __LOCK(lock) \
do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
#define __LOCK_BH(lock) \
do { local_bh_disable(); __LOCK(lock); } while (0)
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
#define __LOCK_IRQSAVE(lock, flags) \
do { local_irq_save(flags); __LOCK(lock); } while (0)
#define __UNLOCK(lock) \
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
in include/linux/spinlock.h
#define spin_lock(lock) _spin_lock(lock)