|
@@ -30,21 +30,6 @@ unsigned int sysctl_nr_open_min = BITS_PER_LONG;
|
|
|
unsigned int sysctl_nr_open_max =
|
|
|
__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
|
|
|
|
|
|
-static void *alloc_fdmem(size_t size)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Very large allocations can stress page reclaim, so fall back to
|
|
|
- * vmalloc() if the allocation size will be considered "large" by the VM.
|
|
|
- */
|
|
|
- if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
|
|
|
- void *data = kmalloc(size, GFP_KERNEL_ACCOUNT |
|
|
|
- __GFP_NOWARN | __GFP_NORETRY);
|
|
|
- if (data != NULL)
|
|
|
- return data;
|
|
|
- }
|
|
|
- return __vmalloc(size, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
|
|
|
-}
|
|
|
-
|
|
|
static void __free_fdtable(struct fdtable *fdt)
|
|
|
{
|
|
|
kvfree(fdt->fd);
|
|
@@ -131,13 +116,14 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
|
|
|
if (!fdt)
|
|
|
goto out;
|
|
|
fdt->max_fds = nr;
|
|
|
- data = alloc_fdmem(nr * sizeof(struct file *));
|
|
|
+ data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
|
|
|
if (!data)
|
|
|
goto out_fdt;
|
|
|
fdt->fd = data;
|
|
|
|
|
|
- data = alloc_fdmem(max_t(size_t,
|
|
|
- 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
|
|
|
+ data = kvmalloc(max_t(size_t,
|
|
|
+ 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
|
|
|
+ GFP_KERNEL_ACCOUNT);
|
|
|
if (!data)
|
|
|
goto out_arr;
|
|
|
fdt->open_fds = data;
|