|
@@ -1162,11 +1162,6 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
|
|
|
sk->sk_prot->enter_memory_pressure(sk);
|
|
|
}
|
|
|
|
|
|
-static inline long sk_prot_mem_limits(const struct sock *sk, int index)
|
|
|
-{
|
|
|
- return sk->sk_prot->sysctl_mem[index];
|
|
|
-}
|
|
|
-
|
|
|
static inline long
|
|
|
sk_memory_allocated(const struct sock *sk)
|
|
|
{
|
|
@@ -1281,11 +1276,27 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind);
|
|
|
void __sk_mem_reduce_allocated(struct sock *sk, int amount);
|
|
|
void __sk_mem_reclaim(struct sock *sk, int amount);
|
|
|
|
|
|
-#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
|
|
|
+/* We used to have PAGE_SIZE here, but systems with 64KB pages
|
|
|
+ * do not necessarily have 16x time more memory than 4KB ones.
|
|
|
+ */
|
|
|
+#define SK_MEM_QUANTUM 4096
|
|
|
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
|
|
|
#define SK_MEM_SEND 0
|
|
|
#define SK_MEM_RECV 1
|
|
|
|
|
|
+/* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
|
|
|
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
|
|
|
+{
|
|
|
+ long val = sk->sk_prot->sysctl_mem[index];
|
|
|
+
|
|
|
+#if PAGE_SIZE > SK_MEM_QUANTUM
|
|
|
+ val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
|
|
|
+#elif PAGE_SIZE < SK_MEM_QUANTUM
|
|
|
+ val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
|
|
|
+#endif
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
static inline int sk_mem_pages(int amt)
|
|
|
{
|
|
|
return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
|