|
|
@@ -2034,25 +2034,6 @@ static inline int dl_bw_cpus(int i)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static inline
|
|
|
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
|
|
|
-{
|
|
|
- dl_b->total_bw -= tsk_bw;
|
|
|
-}
|
|
|
-
|
|
|
-static inline
|
|
|
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
|
|
|
-{
|
|
|
- dl_b->total_bw += tsk_bw;
|
|
|
-}
|
|
|
-
|
|
|
-static inline
|
|
|
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
|
|
|
-{
|
|
|
- return dl_b->bw != -1 &&
|
|
|
- dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* We must be sure that accepting a new task (or allowing changing the
|
|
|
* parameters of an existing one) is consistent with the bandwidth
|
|
|
@@ -4669,6 +4650,57 @@ void init_idle(struct task_struct *idle, int cpu)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+int task_can_attach(struct task_struct *p,
|
|
|
+ const struct cpumask *cs_cpus_allowed)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Kthreads which disallow setaffinity shouldn't be moved
|
|
|
+ * to a new cpuset; we don't want to change their cpu
|
|
|
+ * affinity and isolating such threads by their set of
|
|
|
+ * allowed nodes is unnecessary. Thus, cpusets are not
|
|
|
+ * applicable for such threads. This prevents checking for
|
|
|
+ * success of set_cpus_allowed_ptr() on all attached tasks
|
|
|
+ * before cpus_allowed may be changed.
|
|
|
+ */
|
|
|
+ if (p->flags & PF_NO_SETAFFINITY) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
|
|
|
+ cs_cpus_allowed)) {
|
|
|
+ unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
|
|
|
+ cs_cpus_allowed);
|
|
|
+ struct dl_bw *dl_b = dl_bw_of(dest_cpu);
|
|
|
+ bool overflow;
|
|
|
+ int cpus;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
|
|
|
+ cpus = dl_bw_cpus(dest_cpu);
|
|
|
+ overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
|
|
|
+ if (overflow)
|
|
|
+ ret = -EBUSY;
|
|
|
+ else {
|
|
|
+ /*
|
|
|
+ * We reserve space for this task in the destination
|
|
|
+ * root_domain, as we can't fail after this point.
|
|
|
+ * We will free resources in the source root_domain
|
|
|
+ * later on (see set_cpus_allowed_dl()).
|
|
|
+ */
|
|
|
+ __dl_add(dl_b, p->dl.dl_bw);
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
|
|
+
|
|
|
+ }
|
|
|
+#endif
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
/*
|
|
|
* move_queued_task - move a queued task to new rq.
|