net/socket: factor out helpers for memory and queue manipulation
authorPaolo Abeni <pabeni@redhat.com>
Fri, 21 Oct 2016 11:55:45 +0000 (13:55 +0200)
committerDavid S. Miller <davem@davemloft.net>
Sat, 22 Oct 2016 21:05:05 +0000 (17:05 -0400)
Basic sock operations that udp code can use with its own
memory accounting schema. No functional change is introduced
in the existing APIs.

v4 -> v5:
  - avoid whitespace changes

v2 -> v4:
  - avoid exporting __sock_enqueue_skb

v1 -> v2:
  - avoid export sock_rmem_free

Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sock.h
net/core/datagram.c
net/core/sock.c

index ebf75db08e062dfe7867cc80c7699f593be16349..27648955333892551030f87887c5545018e530af 100644 (file)
@@ -1274,7 +1274,9 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
 /*
  * Functions for memory accounting
  */
+int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
 int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reduce_allocated(struct sock *sk, int amount);
 void __sk_mem_reclaim(struct sock *sk, int amount);
 
 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
@@ -1950,6 +1952,8 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
 
 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
+                       unsigned int flags);
 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
index b7de71f8d5d3a5fa947fa7306fa6812e9f166da5..bfb973aebb5b16a8cd04eebdd712bcd6006e86d6 100644 (file)
@@ -323,6 +323,27 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
 }
 EXPORT_SYMBOL(__skb_free_datagram_locked);
 
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
+                       unsigned int flags)
+{
+       int err = 0;
+
+       if (flags & MSG_PEEK) {
+               err = -ENOENT;
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               if (skb == skb_peek(&sk->sk_receive_queue)) {
+                       __skb_unlink(skb, &sk->sk_receive_queue);
+                       atomic_dec(&skb->users);
+                       err = 0;
+               }
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+       }
+
+       atomic_inc(&sk->sk_drops);
+       return err;
+}
+EXPORT_SYMBOL(__sk_queue_drop_skb);
+
 /**
  *     skb_kill_datagram - Free a datagram skbuff forcibly
  *     @sk: socket
@@ -346,23 +367,10 @@ EXPORT_SYMBOL(__skb_free_datagram_locked);
 
 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
 {
-       int err = 0;
-
-       if (flags & MSG_PEEK) {
-               err = -ENOENT;
-               spin_lock_bh(&sk->sk_receive_queue.lock);
-               if (skb == skb_peek(&sk->sk_receive_queue)) {
-                       __skb_unlink(skb, &sk->sk_receive_queue);
-                       atomic_dec(&skb->users);
-                       err = 0;
-               }
-               spin_unlock_bh(&sk->sk_receive_queue.lock);
-       }
+       int err = __sk_queue_drop_skb(sk, skb, flags);
 
        kfree_skb(skb);
-       atomic_inc(&sk->sk_drops);
        sk_mem_reclaim_partial(sk);
-
        return err;
 }
 EXPORT_SYMBOL(skb_kill_datagram);
index c73e28fc9c2a45225af39f7905072456d4380c7f..d8e4532e89e7c28737c95c723e5f5b3d184a7805 100644 (file)
@@ -2091,24 +2091,18 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
 EXPORT_SYMBOL(sk_wait_data);
 
 /**
- *     __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
+ *     __sk_mem_raise_allocated - increase memory_allocated
  *     @sk: socket
  *     @size: memory size to allocate
+ *     @amt: pages to allocate
  *     @kind: allocation type
  *
- *     If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
- *     rmem allocation. This function assumes that protocols which have
- *     memory_pressure use sk_wmem_queued as write buffer accounting.
+ *     Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
  */
-int __sk_mem_schedule(struct sock *sk, int size, int kind)
+int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 {
        struct proto *prot = sk->sk_prot;
-       int amt = sk_mem_pages(size);
-       long allocated;
-
-       sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-
-       allocated = sk_memory_allocated_add(sk, amt);
+       long allocated = sk_memory_allocated_add(sk, amt);
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
            !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
@@ -2169,9 +2163,6 @@ suppress_allocation:
 
        trace_sock_exceed_buf_limit(sk, prot, allocated);
 
-       /* Alas. Undo changes. */
-       sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
-
        sk_memory_allocated_sub(sk, amt);
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
@@ -2179,18 +2170,40 @@ suppress_allocation:
 
        return 0;
 }
+EXPORT_SYMBOL(__sk_mem_raise_allocated);
+
+/**
+ *     __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
+ *     @sk: socket
+ *     @size: memory size to allocate
+ *     @kind: allocation type
+ *
+ *     If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
+ *     rmem allocation. This function assumes that protocols which have
+ *     memory_pressure use sk_wmem_queued as write buffer accounting.
+ */
+int __sk_mem_schedule(struct sock *sk, int size, int kind)
+{
+       int ret, amt = sk_mem_pages(size);
+
+       sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
+       ret = __sk_mem_raise_allocated(sk, size, amt, kind);
+       if (!ret)
+               sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
+       return ret;
+}
 EXPORT_SYMBOL(__sk_mem_schedule);
 
 /**
- *     __sk_mem_reclaim - reclaim memory_allocated
+ *     __sk_mem_reduce_allocated - reclaim memory_allocated
  *     @sk: socket
- *     @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
+ *     @amount: number of quanta
+ *
+ *     Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
  */
-void __sk_mem_reclaim(struct sock *sk, int amount)
+void __sk_mem_reduce_allocated(struct sock *sk, int amount)
 {
-       amount >>= SK_MEM_QUANTUM_SHIFT;
        sk_memory_allocated_sub(sk, amount);
-       sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
                mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
@@ -2199,6 +2212,19 @@ void __sk_mem_reclaim(struct sock *sk, int amount)
            (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
                sk_leave_memory_pressure(sk);
 }
+EXPORT_SYMBOL(__sk_mem_reduce_allocated);
+
+/**
+ *     __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
+ *     @sk: socket
+ *     @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
+ */
+void __sk_mem_reclaim(struct sock *sk, int amount)
+{
+       amount >>= SK_MEM_QUANTUM_SHIFT;
+       sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
+       __sk_mem_reduce_allocated(sk, amount);
+}
 EXPORT_SYMBOL(__sk_mem_reclaim);
 
 int sk_set_peek_off(struct sock *sk, int val)