|
@@ -65,6 +65,11 @@ struct spu_queue {
|
|
|
struct list_head list;
|
|
|
};
|
|
|
|
|
|
+struct spu_qreg {
|
|
|
+ struct spu_queue *queue;
|
|
|
+ unsigned long type;
|
|
|
+};
|
|
|
+
|
|
|
static struct spu_queue **cpu_to_cwq;
|
|
|
static struct spu_queue **cpu_to_mau;
|
|
|
|
|
@@ -1631,31 +1636,27 @@ static void queue_cache_destroy(void)
|
|
|
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
|
|
|
}
|
|
|
|
|
|
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
|
|
|
+static long spu_queue_register_workfn(void *arg)
|
|
|
{
|
|
|
- cpumask_var_t old_allowed;
|
|
|
+ struct spu_qreg *qr = arg;
|
|
|
+ struct spu_queue *p = qr->queue;
|
|
|
+ unsigned long q_type = qr->type;
|
|
|
unsigned long hv_ret;
|
|
|
|
|
|
- if (cpumask_empty(&p->sharing))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- cpumask_copy(old_allowed, ¤t->cpus_allowed);
|
|
|
-
|
|
|
- set_cpus_allowed_ptr(current, &p->sharing);
|
|
|
-
|
|
|
hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
|
|
|
CWQ_NUM_ENTRIES, &p->qhandle);
|
|
|
if (!hv_ret)
|
|
|
sun4v_ncs_sethead_marker(p->qhandle, 0);
|
|
|
|
|
|
- set_cpus_allowed_ptr(current, old_allowed);
|
|
|
+ return hv_ret ? -EINVAL : 0;
|
|
|
+}
|
|
|
|
|
|
- free_cpumask_var(old_allowed);
|
|
|
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
|
|
|
+{
|
|
|
+ int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
|
|
|
+ struct spu_qreg qr = { .queue = p, .type = q_type };
|
|
|
|
|
|
- return (hv_ret ? -EINVAL : 0);
|
|
|
+ return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
|
|
|
}
|
|
|
|
|
|
static int spu_queue_setup(struct spu_queue *p)
|