|
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
|
|
*/
|
|
*/
|
|
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
{
|
|
{
|
|
- unsigned long free, allowed, reserve;
|
|
|
|
|
|
+ long free, allowed, reserve;
|
|
|
|
|
|
VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
|
|
VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
|
|
-(s64)vm_committed_as_batch * num_online_cpus(),
|
|
-(s64)vm_committed_as_batch * num_online_cpus(),
|
|
@@ -220,7 +220,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
|
*/
|
|
*/
|
|
if (mm) {
|
|
if (mm) {
|
|
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
|
|
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
|
|
- allowed -= min(mm->total_vm / 32, reserve);
|
|
|
|
|
|
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
|
|
}
|
|
}
|
|
|
|
|
|
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
|
|
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
|