scheduler.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Zhi Wang <zhi.a.wang@intel.com>
  25. *
  26. * Contributors:
  27. * Ping Gao <ping.a.gao@intel.com>
  28. * Tina Zhang <tina.zhang@intel.com>
  29. * Chanbin Du <changbin.du@intel.com>
  30. * Min He <min.he@intel.com>
  31. * Bing Niu <bing.niu@intel.com>
  32. * Zhenyu Wang <zhenyuw@linux.intel.com>
  33. *
  34. */
  35. #include <linux/kthread.h>
  36. #include "i915_drv.h"
  37. #include "gvt.h"
  38. #define RING_CTX_OFF(x) \
  39. offsetof(struct execlist_ring_context, x)
  40. static void set_context_pdp_root_pointer(
  41. struct execlist_ring_context *ring_context,
  42. u32 pdp[8])
  43. {
  44. struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
  45. int i;
  46. for (i = 0; i < 8; i++)
  47. pdp_pair[i].val = pdp[7 - i];
  48. }
  49. static int populate_shadow_context(struct intel_vgpu_workload *workload)
  50. {
  51. struct intel_vgpu *vgpu = workload->vgpu;
  52. struct intel_gvt *gvt = vgpu->gvt;
  53. int ring_id = workload->ring_id;
  54. struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
  55. struct drm_i915_gem_object *ctx_obj =
  56. shadow_ctx->engine[ring_id].state->obj;
  57. struct execlist_ring_context *shadow_ring_context;
  58. struct page *page;
  59. void *dst;
  60. unsigned long context_gpa, context_page_num;
  61. int i;
  62. gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
  63. workload->ctx_desc.lrca);
  64. context_page_num = intel_lr_context_size(
  65. gvt->dev_priv->engine[ring_id]);
  66. context_page_num = context_page_num >> PAGE_SHIFT;
  67. if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
  68. context_page_num = 19;
  69. i = 2;
  70. while (i < context_page_num) {
  71. context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
  72. (u32)((workload->ctx_desc.lrca + i) <<
  73. GTT_PAGE_SHIFT));
  74. if (context_gpa == INTEL_GVT_INVALID_ADDR) {
  75. gvt_err("Invalid guest context descriptor\n");
  76. return -EINVAL;
  77. }
  78. page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
  79. dst = kmap(page);
  80. intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
  81. GTT_PAGE_SIZE);
  82. kunmap(page);
  83. i++;
  84. }
  85. page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
  86. shadow_ring_context = kmap(page);
  87. #define COPY_REG(name) \
  88. intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
  89. + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
  90. COPY_REG(ctx_ctrl);
  91. COPY_REG(ctx_timestamp);
  92. if (ring_id == RCS) {
  93. COPY_REG(bb_per_ctx_ptr);
  94. COPY_REG(rcs_indirect_ctx);
  95. COPY_REG(rcs_indirect_ctx_offset);
  96. }
  97. #undef COPY_REG
  98. set_context_pdp_root_pointer(shadow_ring_context,
  99. workload->shadow_mm->shadow_page_table);
  100. intel_gvt_hypervisor_read_gpa(vgpu,
  101. workload->ring_context_gpa +
  102. sizeof(*shadow_ring_context),
  103. (void *)shadow_ring_context +
  104. sizeof(*shadow_ring_context),
  105. GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
  106. kunmap(page);
  107. return 0;
  108. }
  109. static int shadow_context_status_change(struct notifier_block *nb,
  110. unsigned long action, void *data)
  111. {
  112. struct intel_vgpu *vgpu = container_of(nb,
  113. struct intel_vgpu, shadow_ctx_notifier_block);
  114. struct drm_i915_gem_request *req =
  115. (struct drm_i915_gem_request *)data;
  116. struct intel_gvt_workload_scheduler *scheduler =
  117. &vgpu->gvt->scheduler;
  118. struct intel_vgpu_workload *workload =
  119. scheduler->current_workload[req->engine->id];
  120. switch (action) {
  121. case INTEL_CONTEXT_SCHEDULE_IN:
  122. intel_gvt_load_render_mmio(workload->vgpu,
  123. workload->ring_id);
  124. atomic_set(&workload->shadow_ctx_active, 1);
  125. break;
  126. case INTEL_CONTEXT_SCHEDULE_OUT:
  127. intel_gvt_restore_render_mmio(workload->vgpu,
  128. workload->ring_id);
  129. atomic_set(&workload->shadow_ctx_active, 0);
  130. break;
  131. default:
  132. WARN_ON(1);
  133. return NOTIFY_OK;
  134. }
  135. wake_up(&workload->shadow_ctx_status_wq);
  136. return NOTIFY_OK;
  137. }
  138. static int dispatch_workload(struct intel_vgpu_workload *workload)
  139. {
  140. int ring_id = workload->ring_id;
  141. struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
  142. struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
  143. struct drm_i915_gem_request *rq;
  144. int ret;
  145. gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
  146. ring_id, workload);
  147. shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
  148. GEN8_CTX_ADDRESSING_MODE_SHIFT;
  149. mutex_lock(&dev_priv->drm.struct_mutex);
  150. rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
  151. if (IS_ERR(rq)) {
  152. gvt_err("fail to allocate gem request\n");
  153. workload->status = PTR_ERR(rq);
  154. return workload->status;
  155. }
  156. gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
  157. workload->req = i915_gem_request_get(rq);
  158. ret = intel_gvt_scan_and_shadow_workload(workload);
  159. if (ret)
  160. goto out;
  161. ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
  162. if (ret)
  163. goto out;
  164. ret = populate_shadow_context(workload);
  165. if (ret)
  166. goto out;
  167. if (workload->prepare) {
  168. ret = workload->prepare(workload);
  169. if (ret)
  170. goto out;
  171. }
  172. gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
  173. ring_id, workload->req);
  174. ret = 0;
  175. workload->dispatched = true;
  176. out:
  177. if (ret)
  178. workload->status = ret;
  179. i915_add_request_no_flush(rq);
  180. mutex_unlock(&dev_priv->drm.struct_mutex);
  181. return ret;
  182. }
  183. static struct intel_vgpu_workload *pick_next_workload(
  184. struct intel_gvt *gvt, int ring_id)
  185. {
  186. struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
  187. struct intel_vgpu_workload *workload = NULL;
  188. mutex_lock(&gvt->lock);
  189. /*
  190. * no current vgpu / will be scheduled out / no workload
  191. * bail out
  192. */
  193. if (!scheduler->current_vgpu) {
  194. gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
  195. goto out;
  196. }
  197. if (scheduler->need_reschedule) {
  198. gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
  199. goto out;
  200. }
  201. if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
  202. gvt_dbg_sched("ring id %d stop - no available workload\n",
  203. ring_id);
  204. goto out;
  205. }
  206. /*
  207. * still have current workload, maybe the workload disptacher
  208. * fail to submit it for some reason, resubmit it.
  209. */
  210. if (scheduler->current_workload[ring_id]) {
  211. workload = scheduler->current_workload[ring_id];
  212. gvt_dbg_sched("ring id %d still have current workload %p\n",
  213. ring_id, workload);
  214. goto out;
  215. }
  216. /*
  217. * pick a workload as current workload
  218. * once current workload is set, schedule policy routines
  219. * will wait the current workload is finished when trying to
  220. * schedule out a vgpu.
  221. */
  222. scheduler->current_workload[ring_id] = container_of(
  223. workload_q_head(scheduler->current_vgpu, ring_id)->next,
  224. struct intel_vgpu_workload, list);
  225. workload = scheduler->current_workload[ring_id];
  226. gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
  227. atomic_inc(&workload->vgpu->running_workload_num);
  228. out:
  229. mutex_unlock(&gvt->lock);
  230. return workload;
  231. }
  232. static void update_guest_context(struct intel_vgpu_workload *workload)
  233. {
  234. struct intel_vgpu *vgpu = workload->vgpu;
  235. struct intel_gvt *gvt = vgpu->gvt;
  236. int ring_id = workload->ring_id;
  237. struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
  238. struct drm_i915_gem_object *ctx_obj =
  239. shadow_ctx->engine[ring_id].state->obj;
  240. struct execlist_ring_context *shadow_ring_context;
  241. struct page *page;
  242. void *src;
  243. unsigned long context_gpa, context_page_num;
  244. int i;
  245. gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
  246. workload->ctx_desc.lrca);
  247. context_page_num = intel_lr_context_size(
  248. gvt->dev_priv->engine[ring_id]);
  249. context_page_num = context_page_num >> PAGE_SHIFT;
  250. if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
  251. context_page_num = 19;
  252. i = 2;
  253. while (i < context_page_num) {
  254. context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
  255. (u32)((workload->ctx_desc.lrca + i) <<
  256. GTT_PAGE_SHIFT));
  257. if (context_gpa == INTEL_GVT_INVALID_ADDR) {
  258. gvt_err("invalid guest context descriptor\n");
  259. return;
  260. }
  261. page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
  262. src = kmap(page);
  263. intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
  264. GTT_PAGE_SIZE);
  265. kunmap(page);
  266. i++;
  267. }
  268. intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
  269. RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
  270. page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
  271. shadow_ring_context = kmap(page);
  272. #define COPY_REG(name) \
  273. intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
  274. RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
  275. COPY_REG(ctx_ctrl);
  276. COPY_REG(ctx_timestamp);
  277. #undef COPY_REG
  278. intel_gvt_hypervisor_write_gpa(vgpu,
  279. workload->ring_context_gpa +
  280. sizeof(*shadow_ring_context),
  281. (void *)shadow_ring_context +
  282. sizeof(*shadow_ring_context),
  283. GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
  284. kunmap(page);
  285. }
  286. static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
  287. {
  288. struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
  289. struct intel_vgpu_workload *workload;
  290. int event;
  291. mutex_lock(&gvt->lock);
  292. workload = scheduler->current_workload[ring_id];
  293. if (!workload->status && !workload->vgpu->resetting) {
  294. wait_event(workload->shadow_ctx_status_wq,
  295. !atomic_read(&workload->shadow_ctx_active));
  296. update_guest_context(workload);
  297. for_each_set_bit(event, workload->pending_events,
  298. INTEL_GVT_EVENT_MAX)
  299. intel_vgpu_trigger_virtual_event(workload->vgpu,
  300. event);
  301. }
  302. gvt_dbg_sched("ring id %d complete workload %p status %d\n",
  303. ring_id, workload, workload->status);
  304. scheduler->current_workload[ring_id] = NULL;
  305. atomic_dec(&workload->vgpu->running_workload_num);
  306. list_del_init(&workload->list);
  307. workload->complete(workload);
  308. wake_up(&scheduler->workload_complete_wq);
  309. mutex_unlock(&gvt->lock);
  310. }
  311. struct workload_thread_param {
  312. struct intel_gvt *gvt;
  313. int ring_id;
  314. };
  315. static DEFINE_MUTEX(scheduler_mutex);
  316. static int workload_thread(void *priv)
  317. {
  318. struct workload_thread_param *p = (struct workload_thread_param *)priv;
  319. struct intel_gvt *gvt = p->gvt;
  320. int ring_id = p->ring_id;
  321. struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
  322. struct intel_vgpu_workload *workload = NULL;
  323. long lret;
  324. int ret;
  325. bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
  326. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  327. kfree(p);
  328. gvt_dbg_core("workload thread for ring %d started\n", ring_id);
  329. while (!kthread_should_stop()) {
  330. add_wait_queue(&scheduler->waitq[ring_id], &wait);
  331. do {
  332. workload = pick_next_workload(gvt, ring_id);
  333. if (workload)
  334. break;
  335. wait_woken(&wait, TASK_INTERRUPTIBLE,
  336. MAX_SCHEDULE_TIMEOUT);
  337. } while (!kthread_should_stop());
  338. remove_wait_queue(&scheduler->waitq[ring_id], &wait);
  339. if (!workload)
  340. break;
  341. mutex_lock(&scheduler_mutex);
  342. gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
  343. workload->ring_id, workload,
  344. workload->vgpu->id);
  345. intel_runtime_pm_get(gvt->dev_priv);
  346. gvt_dbg_sched("ring id %d will dispatch workload %p\n",
  347. workload->ring_id, workload);
  348. if (need_force_wake)
  349. intel_uncore_forcewake_get(gvt->dev_priv,
  350. FORCEWAKE_ALL);
  351. mutex_lock(&gvt->lock);
  352. ret = dispatch_workload(workload);
  353. mutex_unlock(&gvt->lock);
  354. if (ret) {
  355. gvt_err("fail to dispatch workload, skip\n");
  356. goto complete;
  357. }
  358. gvt_dbg_sched("ring id %d wait workload %p\n",
  359. workload->ring_id, workload);
  360. lret = i915_wait_request(workload->req,
  361. 0, MAX_SCHEDULE_TIMEOUT);
  362. if (lret < 0) {
  363. workload->status = lret;
  364. gvt_err("fail to wait workload, skip\n");
  365. } else {
  366. workload->status = 0;
  367. }
  368. complete:
  369. gvt_dbg_sched("will complete workload %p\n, status: %d\n",
  370. workload, workload->status);
  371. complete_current_workload(gvt, ring_id);
  372. i915_gem_request_put(fetch_and_zero(&workload->req));
  373. if (need_force_wake)
  374. intel_uncore_forcewake_put(gvt->dev_priv,
  375. FORCEWAKE_ALL);
  376. intel_runtime_pm_put(gvt->dev_priv);
  377. mutex_unlock(&scheduler_mutex);
  378. }
  379. return 0;
  380. }
  381. void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
  382. {
  383. struct intel_gvt *gvt = vgpu->gvt;
  384. struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
  385. if (atomic_read(&vgpu->running_workload_num)) {
  386. gvt_dbg_sched("wait vgpu idle\n");
  387. wait_event(scheduler->workload_complete_wq,
  388. !atomic_read(&vgpu->running_workload_num));
  389. }
  390. }
  391. void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
  392. {
  393. struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
  394. int i;
  395. gvt_dbg_core("clean workload scheduler\n");
  396. for (i = 0; i < I915_NUM_ENGINES; i++) {
  397. if (scheduler->thread[i]) {
  398. kthread_stop(scheduler->thread[i]);
  399. scheduler->thread[i] = NULL;
  400. }
  401. }
  402. }
  403. int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
  404. {
  405. struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
  406. struct workload_thread_param *param = NULL;
  407. int ret;
  408. int i;
  409. gvt_dbg_core("init workload scheduler\n");
  410. init_waitqueue_head(&scheduler->workload_complete_wq);
  411. for (i = 0; i < I915_NUM_ENGINES; i++) {
  412. /* check ring mask at init time */
  413. if (!HAS_ENGINE(gvt->dev_priv, i))
  414. continue;
  415. init_waitqueue_head(&scheduler->waitq[i]);
  416. param = kzalloc(sizeof(*param), GFP_KERNEL);
  417. if (!param) {
  418. ret = -ENOMEM;
  419. goto err;
  420. }
  421. param->gvt = gvt;
  422. param->ring_id = i;
  423. scheduler->thread[i] = kthread_run(workload_thread, param,
  424. "gvt workload %d", i);
  425. if (IS_ERR(scheduler->thread[i])) {
  426. gvt_err("fail to create workload thread\n");
  427. ret = PTR_ERR(scheduler->thread[i]);
  428. goto err;
  429. }
  430. }
  431. return 0;
  432. err:
  433. intel_gvt_clean_workload_scheduler(gvt);
  434. kfree(param);
  435. param = NULL;
  436. return ret;
  437. }
  438. void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
  439. {
  440. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  441. atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
  442. &vgpu->shadow_ctx_notifier_block);
  443. mutex_lock(&dev_priv->drm.struct_mutex);
  444. /* a little hacky to mark as ctx closed */
  445. vgpu->shadow_ctx->closed = true;
  446. i915_gem_context_put(vgpu->shadow_ctx);
  447. mutex_unlock(&dev_priv->drm.struct_mutex);
  448. }
  449. int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
  450. {
  451. atomic_set(&vgpu->running_workload_num, 0);
  452. vgpu->shadow_ctx = i915_gem_context_create_gvt(
  453. &vgpu->gvt->dev_priv->drm);
  454. if (IS_ERR(vgpu->shadow_ctx))
  455. return PTR_ERR(vgpu->shadow_ctx);
  456. vgpu->shadow_ctx->engine[RCS].initialised = true;
  457. vgpu->shadow_ctx_notifier_block.notifier_call =
  458. shadow_context_status_change;
  459. atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
  460. &vgpu->shadow_ctx_notifier_block);
  461. return 0;
  462. }