|
@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
|
|
unsigned int nr_table_entries)
|
|
unsigned int nr_table_entries)
|
|
{
|
|
{
|
|
size_t lopt_size = sizeof(struct listen_sock);
|
|
size_t lopt_size = sizeof(struct listen_sock);
|
|
- struct listen_sock *lopt;
|
|
|
|
|
|
+ struct listen_sock *lopt = NULL;
|
|
|
|
|
|
nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
|
|
nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
|
|
nr_table_entries = max_t(u32, nr_table_entries, 8);
|
|
nr_table_entries = max_t(u32, nr_table_entries, 8);
|
|
nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
|
|
nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
|
|
lopt_size += nr_table_entries * sizeof(struct request_sock *);
|
|
lopt_size += nr_table_entries * sizeof(struct request_sock *);
|
|
- if (lopt_size > PAGE_SIZE)
|
|
|
|
|
|
+
|
|
|
|
+ if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
|
|
|
|
+ lopt = kzalloc(lopt_size, GFP_KERNEL |
|
|
|
|
+ __GFP_NOWARN |
|
|
|
|
+ __GFP_NORETRY);
|
|
|
|
+ if (!lopt)
|
|
lopt = vzalloc(lopt_size);
|
|
lopt = vzalloc(lopt_size);
|
|
- else
|
|
|
|
- lopt = kzalloc(lopt_size, GFP_KERNEL);
|
|
|
|
- if (lopt == NULL)
|
|
|
|
|
|
+ if (!lopt)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
- for (lopt->max_qlen_log = 3;
|
|
|
|
- (1 << lopt->max_qlen_log) < nr_table_entries;
|
|
|
|
- lopt->max_qlen_log++);
|
|
|
|
-
|
|
|
|
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
|
|
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
|
|
rwlock_init(&queue->syn_wait_lock);
|
|
rwlock_init(&queue->syn_wait_lock);
|
|
queue->rskq_accept_head = NULL;
|
|
queue->rskq_accept_head = NULL;
|
|
lopt->nr_table_entries = nr_table_entries;
|
|
lopt->nr_table_entries = nr_table_entries;
|
|
|
|
+ lopt->max_qlen_log = ilog2(nr_table_entries);
|
|
|
|
|
|
write_lock_bh(&queue->syn_wait_lock);
|
|
write_lock_bh(&queue->syn_wait_lock);
|
|
queue->listen_opt = lopt;
|
|
queue->listen_opt = lopt;
|
|
@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
|
|
|
|
|
|
void __reqsk_queue_destroy(struct request_sock_queue *queue)
|
|
void __reqsk_queue_destroy(struct request_sock_queue *queue)
|
|
{
|
|
{
|
|
- struct listen_sock *lopt;
|
|
|
|
- size_t lopt_size;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * this is an error recovery path only
|
|
|
|
- * no locking needed and the lopt is not NULL
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- lopt = queue->listen_opt;
|
|
|
|
- lopt_size = sizeof(struct listen_sock) +
|
|
|
|
- lopt->nr_table_entries * sizeof(struct request_sock *);
|
|
|
|
-
|
|
|
|
- if (lopt_size > PAGE_SIZE)
|
|
|
|
- vfree(lopt);
|
|
|
|
- else
|
|
|
|
- kfree(lopt);
|
|
|
|
|
|
+ /* This is an error recovery path only, no locking needed */
|
|
|
|
+ kvfree(queue->listen_opt);
|
|
}
|
|
}
|
|
|
|
|
|
static inline struct listen_sock *reqsk_queue_yank_listen_sk(
|
|
static inline struct listen_sock *reqsk_queue_yank_listen_sk(
|
|
@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
|
|
{
|
|
{
|
|
/* make all the listen_opt local to us */
|
|
/* make all the listen_opt local to us */
|
|
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
|
|
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
|
|
- size_t lopt_size = sizeof(struct listen_sock) +
|
|
|
|
- lopt->nr_table_entries * sizeof(struct request_sock *);
|
|
|
|
|
|
|
|
if (lopt->qlen != 0) {
|
|
if (lopt->qlen != 0) {
|
|
unsigned int i;
|
|
unsigned int i;
|
|
@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
|
|
}
|
|
}
|
|
|
|
|
|
WARN_ON(lopt->qlen != 0);
|
|
WARN_ON(lopt->qlen != 0);
|
|
- if (lopt_size > PAGE_SIZE)
|
|
|
|
- vfree(lopt);
|
|
|
|
- else
|
|
|
|
- kfree(lopt);
|
|
|
|
|
|
+ kvfree(lopt);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|