|
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
|
|
|
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
|
|
|
{
|
|
|
struct kioctx_cpu *kcpu;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
preempt_disable();
|
|
|
kcpu = this_cpu_ptr(ctx->cpu);
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
kcpu->reqs_available += nr;
|
|
|
+
|
|
|
while (kcpu->reqs_available >= ctx->req_batch * 2) {
|
|
|
kcpu->reqs_available -= ctx->req_batch;
|
|
|
atomic_add(ctx->req_batch, &ctx->reqs_available);
|
|
|
}
|
|
|
|
|
|
+ local_irq_restore(flags);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
|
|
|
{
|
|
|
struct kioctx_cpu *kcpu;
|
|
|
bool ret = false;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
preempt_disable();
|
|
|
kcpu = this_cpu_ptr(ctx->cpu);
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
if (!kcpu->reqs_available) {
|
|
|
int old, avail = atomic_read(&ctx->reqs_available);
|
|
|
|
|
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
|
|
|
ret = true;
|
|
|
kcpu->reqs_available--;
|
|
|
out:
|
|
|
+ local_irq_restore(flags);
|
|
|
preempt_enable();
|
|
|
return ret;
|
|
|
}
|