|
@@ -48,7 +48,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
|
|
void *
|
|
void *
|
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
{
|
|
{
|
|
- unsigned noio_flag = 0;
|
|
|
|
|
|
+ unsigned nofs_flag = 0;
|
|
void *ptr;
|
|
void *ptr;
|
|
gfp_t lflags;
|
|
gfp_t lflags;
|
|
|
|
|
|
@@ -60,17 +60,17 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
|
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
|
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
|
|
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
|
|
* here. Hence we need to tell memory reclaim that we are in such a
|
|
* here. Hence we need to tell memory reclaim that we are in such a
|
|
- * context via PF_MEMALLOC_NOIO to prevent memory reclaim re-entering
|
|
|
|
|
|
+ * context via PF_MEMALLOC_NOFS to prevent memory reclaim re-entering
|
|
* the filesystem here and potentially deadlocking.
|
|
* the filesystem here and potentially deadlocking.
|
|
*/
|
|
*/
|
|
- if ((current->flags & PF_MEMALLOC_NOFS) || (flags & KM_NOFS))
|
|
|
|
- noio_flag = memalloc_noio_save();
|
|
|
|
|
|
+ if (flags & KM_NOFS)
|
|
|
|
+ nofs_flag = memalloc_nofs_save();
|
|
|
|
|
|
lflags = kmem_flags_convert(flags);
|
|
lflags = kmem_flags_convert(flags);
|
|
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
|
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
|
|
|
|
|
- if ((current->flags & PF_MEMALLOC_NOFS) || (flags & KM_NOFS))
|
|
|
|
- memalloc_noio_restore(noio_flag);
|
|
|
|
|
|
+ if (flags & KM_NOFS)
|
|
|
|
+ memalloc_nofs_restore(nofs_flag);
|
|
|
|
|
|
return ptr;
|
|
return ptr;
|
|
}
|
|
}
|