Эх сурвалжийг харах

net: allow per netns sysctl_rmem and sysctl_wmem for protos

As we want to gradually implement per netns sysctl_rmem and sysctl_wmem
on per protocol basis, add two new fields in struct proto,
and two new helpers : sk_get_wmem0() and sk_get_rmem0()

First user will be TCP. Then UDP and SCTP can be easily converted,
while DECNET probably wont get this support.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Eric Dumazet 7 жил өмнө
parent
commit
a3dcaf17ee

+ 22 - 0
include/net/sock.h

@@ -1101,8 +1101,12 @@ struct proto {
 	 */
 	 */
 	unsigned long		*memory_pressure;
 	unsigned long		*memory_pressure;
 	long			*sysctl_mem;
 	long			*sysctl_mem;
+
 	int			*sysctl_wmem;
 	int			*sysctl_wmem;
 	int			*sysctl_rmem;
 	int			*sysctl_rmem;
+	u32			sysctl_wmem_offset;
+	u32			sysctl_rmem_offset;
+
 	int			max_header;
 	int			max_header;
 	bool			no_autobind;
 	bool			no_autobind;
 
 
@@ -2390,4 +2394,22 @@ extern int sysctl_optmem_max;
 extern __u32 sysctl_wmem_default;
 extern __u32 sysctl_wmem_default;
 extern __u32 sysctl_rmem_default;
 extern __u32 sysctl_rmem_default;
 
 
+static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
+{
+	/* Does this proto have per netns sysctl_wmem ? */
+	if (proto->sysctl_wmem_offset)
+		return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+
+	return *proto->sysctl_wmem;
+}
+
+static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
+{
+	/* Does this proto have per netns sysctl_rmem ? */
+	if (proto->sysctl_rmem_offset)
+		return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+
+	return *proto->sysctl_rmem;
+}
+
 #endif	/* _SOCK_H */
 #endif	/* _SOCK_H */

+ 1 - 1
include/trace/events/sock.h

@@ -48,7 +48,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
 		strncpy(__entry->name, prot->name, 32);
 		strncpy(__entry->name, prot->name, 32);
 		__entry->sysctl_mem = prot->sysctl_mem;
 		__entry->sysctl_mem = prot->sysctl_mem;
 		__entry->allocated = allocated;
 		__entry->allocated = allocated;
-		__entry->sysctl_rmem = prot->sysctl_rmem[0];
+		__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
 		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
 		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
 	),
 	),
 
 

+ 6 - 4
net/core/sock.c

@@ -2346,16 +2346,18 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 
 
 	/* guarantee minimum buffer size under pressure */
 	/* guarantee minimum buffer size under pressure */
 	if (kind == SK_MEM_RECV) {
 	if (kind == SK_MEM_RECV) {
-		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
+		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
 			return 1;
 			return 1;
 
 
 	} else { /* SK_MEM_SEND */
 	} else { /* SK_MEM_SEND */
+		int wmem0 = sk_get_wmem0(sk, prot);
+
 		if (sk->sk_type == SOCK_STREAM) {
 		if (sk->sk_type == SOCK_STREAM) {
-			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
+			if (sk->sk_wmem_queued < wmem0)
 				return 1;
 				return 1;
-		} else if (refcount_read(&sk->sk_wmem_alloc) <
-			   prot->sysctl_wmem[0])
+		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
 				return 1;
 				return 1;
+		}
 	}
 	}
 
 
 	if (sk_has_memory_pressure(sk)) {
 	if (sk_has_memory_pressure(sk)) {