i915_cmd_parser.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Brad Volkin <bradley.d.volkin@intel.com>
  25. *
  26. */
  27. #include "i915_drv.h"
  28. /**
  29. * DOC: batch buffer command parser
  30. *
  31. * Motivation:
  32. * Certain OpenGL features (e.g. transform feedback, performance monitoring)
  33. * require userspace code to submit batches containing commands such as
  34. * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
  35. * generations of the hardware will noop these commands in "unsecure" batches
  36. * (which includes all userspace batches submitted via i915) even though the
  37. * commands may be safe and represent the intended programming model of the
  38. * device.
  39. *
  40. * The software command parser is similar in operation to the command parsing
  41. * done in hardware for unsecure batches. However, the software parser allows
  42. * some operations that would be noop'd by hardware, if the parser determines
  43. * the operation is safe, and submits the batch as "secure" to prevent hardware
  44. * parsing.
  45. *
  46. * Threats:
  47. * At a high level, the hardware (and software) checks attempt to prevent
  48. * granting userspace undue privileges. There are three categories of privilege.
  49. *
  50. * First, commands which are explicitly defined as privileged or which should
  51. * only be used by the kernel driver. The parser generally rejects such
  52. * commands, though it may allow some from the drm master process.
  53. *
  54. * Second, commands which access registers. To support correct/enhanced
  55. * userspace functionality, particularly certain OpenGL extensions, the parser
  56. * provides a whitelist of registers which userspace may safely access (for both
  57. * normal and drm master processes).
  58. *
  59. * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
  60. * The parser always rejects such commands.
  61. *
  62. * The majority of the problematic commands fall in the MI_* range, with only a
  63. * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
  64. *
  65. * Implementation:
  66. * Each ring maintains tables of commands and registers which the parser uses in
  67. * scanning batch buffers submitted to that ring.
  68. *
  69. * Since the set of commands that the parser must check for is significantly
  70. * smaller than the number of commands supported, the parser tables contain only
  71. * those commands required by the parser. This generally works because command
  72. * opcode ranges have standard command length encodings. So for commands that
  73. * the parser does not need to check, it can easily skip them. This is
  74. * implemented via a per-ring length decoding vfunc.
  75. *
  76. * Unfortunately, there are a number of commands that do not follow the standard
  77. * length encoding for their opcode range, primarily amongst the MI_* commands.
  78. * To handle this, the parser provides a way to define explicit "skip" entries
  79. * in the per-ring command tables.
  80. *
  81. * Other command table entries map fairly directly to high level categories
  82. * mentioned above: rejected, master-only, register whitelist. The parser
  83. * implements a number of checks, including the privileged memory checks, via a
  84. * general bitmasking mechanism.
  85. */
  86. #define STD_MI_OPCODE_MASK 0xFF800000
  87. #define STD_3D_OPCODE_MASK 0xFFFF0000
  88. #define STD_2D_OPCODE_MASK 0xFFC00000
  89. #define STD_MFX_OPCODE_MASK 0xFFFF0000
  90. #define CMD(op, opm, f, lm, fl, ...) \
  91. { \
  92. .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
  93. .cmd = { (op), (opm) }, \
  94. .length = { (lm) }, \
  95. __VA_ARGS__ \
  96. }
  97. /* Convenience macros to compress the tables */
  98. #define SMI STD_MI_OPCODE_MASK
  99. #define S3D STD_3D_OPCODE_MASK
  100. #define S2D STD_2D_OPCODE_MASK
  101. #define SMFX STD_MFX_OPCODE_MASK
  102. #define F true
  103. #define S CMD_DESC_SKIP
  104. #define R CMD_DESC_REJECT
  105. #define W CMD_DESC_REGISTER
  106. #define B CMD_DESC_BITMASK
  107. #define M CMD_DESC_MASTER
  108. /* Command Mask Fixed Len Action
  109. ---------------------------------------------------------- */
  110. static const struct drm_i915_cmd_descriptor common_cmds[] = {
  111. CMD( MI_NOOP, SMI, F, 1, S ),
  112. CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
  113. CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
  114. CMD( MI_ARB_CHECK, SMI, F, 1, S ),
  115. CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
  116. CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
  117. CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
  118. CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
  119. CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
  120. .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
  121. CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B,
  122. .reg = { .offset = 1, .mask = 0x007FFFFC },
  123. .bits = {{
  124. .offset = 0,
  125. .mask = MI_GLOBAL_GTT,
  126. .expected = 0,
  127. }}, ),
  128. CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B,
  129. .reg = { .offset = 1, .mask = 0x007FFFFC },
  130. .bits = {{
  131. .offset = 0,
  132. .mask = MI_GLOBAL_GTT,
  133. .expected = 0,
  134. }}, ),
  135. /*
  136. * MI_BATCH_BUFFER_START requires some special handling. It's not
  137. * really a 'skip' action but it doesn't seem like it's worth adding
  138. * a new action. See i915_parse_cmds().
  139. */
  140. CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
  141. };
  142. static const struct drm_i915_cmd_descriptor render_cmds[] = {
  143. CMD( MI_FLUSH, SMI, F, 1, S ),
  144. CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
  145. CMD( MI_PREDICATE, SMI, F, 1, S ),
  146. CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
  147. CMD( MI_SET_APPID, SMI, F, 1, S ),
  148. CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
  149. CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
  150. CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
  151. CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
  152. .bits = {{
  153. .offset = 0,
  154. .mask = MI_GLOBAL_GTT,
  155. .expected = 0,
  156. }}, ),
  157. CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
  158. CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
  159. .bits = {{
  160. .offset = 0,
  161. .mask = MI_GLOBAL_GTT,
  162. .expected = 0,
  163. }}, ),
  164. CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
  165. .bits = {{
  166. .offset = 1,
  167. .mask = MI_REPORT_PERF_COUNT_GGTT,
  168. .expected = 0,
  169. }}, ),
  170. CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
  171. .bits = {{
  172. .offset = 0,
  173. .mask = MI_GLOBAL_GTT,
  174. .expected = 0,
  175. }}, ),
  176. CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
  177. CMD( PIPELINE_SELECT, S3D, F, 1, S ),
  178. CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
  179. .bits = {{
  180. .offset = 2,
  181. .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
  182. .expected = 0,
  183. }}, ),
  184. CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
  185. CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
  186. CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
  187. CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
  188. .bits = {{
  189. .offset = 1,
  190. .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
  191. .expected = 0,
  192. },
  193. {
  194. .offset = 1,
  195. .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
  196. PIPE_CONTROL_STORE_DATA_INDEX),
  197. .expected = 0,
  198. .condition_offset = 1,
  199. .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
  200. }}, ),
  201. };
  202. static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
  203. CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
  204. CMD( MI_RS_CONTROL, SMI, F, 1, S ),
  205. CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
  206. CMD( MI_SET_APPID, SMI, F, 1, S ),
  207. CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
  208. CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
  209. CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
  210. CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
  211. CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
  212. CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
  213. CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
  214. CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
  215. CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
  216. CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
  217. CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
  218. CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
  219. CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
  220. CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
  221. };
  222. static const struct drm_i915_cmd_descriptor video_cmds[] = {
  223. CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
  224. CMD( MI_SET_APPID, SMI, F, 1, S ),
  225. CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
  226. .bits = {{
  227. .offset = 0,
  228. .mask = MI_GLOBAL_GTT,
  229. .expected = 0,
  230. }}, ),
  231. CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
  232. CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
  233. .bits = {{
  234. .offset = 0,
  235. .mask = MI_FLUSH_DW_NOTIFY,
  236. .expected = 0,
  237. },
  238. {
  239. .offset = 1,
  240. .mask = MI_FLUSH_DW_USE_GTT,
  241. .expected = 0,
  242. .condition_offset = 0,
  243. .condition_mask = MI_FLUSH_DW_OP_MASK,
  244. },
  245. {
  246. .offset = 0,
  247. .mask = MI_FLUSH_DW_STORE_INDEX,
  248. .expected = 0,
  249. .condition_offset = 0,
  250. .condition_mask = MI_FLUSH_DW_OP_MASK,
  251. }}, ),
  252. CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
  253. .bits = {{
  254. .offset = 0,
  255. .mask = MI_GLOBAL_GTT,
  256. .expected = 0,
  257. }}, ),
  258. /*
  259. * MFX_WAIT doesn't fit the way we handle length for most commands.
  260. * It has a length field but it uses a non-standard length bias.
  261. * It is always 1 dword though, so just treat it as fixed length.
  262. */
  263. CMD( MFX_WAIT, SMFX, F, 1, S ),
  264. };
  265. static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
  266. CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
  267. CMD( MI_SET_APPID, SMI, F, 1, S ),
  268. CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
  269. .bits = {{
  270. .offset = 0,
  271. .mask = MI_GLOBAL_GTT,
  272. .expected = 0,
  273. }}, ),
  274. CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
  275. CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
  276. .bits = {{
  277. .offset = 0,
  278. .mask = MI_FLUSH_DW_NOTIFY,
  279. .expected = 0,
  280. },
  281. {
  282. .offset = 1,
  283. .mask = MI_FLUSH_DW_USE_GTT,
  284. .expected = 0,
  285. .condition_offset = 0,
  286. .condition_mask = MI_FLUSH_DW_OP_MASK,
  287. },
  288. {
  289. .offset = 0,
  290. .mask = MI_FLUSH_DW_STORE_INDEX,
  291. .expected = 0,
  292. .condition_offset = 0,
  293. .condition_mask = MI_FLUSH_DW_OP_MASK,
  294. }}, ),
  295. CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
  296. .bits = {{
  297. .offset = 0,
  298. .mask = MI_GLOBAL_GTT,
  299. .expected = 0,
  300. }}, ),
  301. };
  302. static const struct drm_i915_cmd_descriptor blt_cmds[] = {
  303. CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
  304. CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
  305. .bits = {{
  306. .offset = 0,
  307. .mask = MI_GLOBAL_GTT,
  308. .expected = 0,
  309. }}, ),
  310. CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
  311. CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
  312. .bits = {{
  313. .offset = 0,
  314. .mask = MI_FLUSH_DW_NOTIFY,
  315. .expected = 0,
  316. },
  317. {
  318. .offset = 1,
  319. .mask = MI_FLUSH_DW_USE_GTT,
  320. .expected = 0,
  321. .condition_offset = 0,
  322. .condition_mask = MI_FLUSH_DW_OP_MASK,
  323. },
  324. {
  325. .offset = 0,
  326. .mask = MI_FLUSH_DW_STORE_INDEX,
  327. .expected = 0,
  328. .condition_offset = 0,
  329. .condition_mask = MI_FLUSH_DW_OP_MASK,
  330. }}, ),
  331. CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
  332. CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
  333. };
  334. static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
  335. CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
  336. CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
  337. };
  338. #undef CMD
  339. #undef SMI
  340. #undef S3D
  341. #undef S2D
  342. #undef SMFX
  343. #undef F
  344. #undef S
  345. #undef R
  346. #undef W
  347. #undef B
  348. #undef M
  349. static const struct drm_i915_cmd_table gen7_render_cmds[] = {
  350. { common_cmds, ARRAY_SIZE(common_cmds) },
  351. { render_cmds, ARRAY_SIZE(render_cmds) },
  352. };
  353. static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
  354. { common_cmds, ARRAY_SIZE(common_cmds) },
  355. { render_cmds, ARRAY_SIZE(render_cmds) },
  356. { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
  357. };
  358. static const struct drm_i915_cmd_table gen7_video_cmds[] = {
  359. { common_cmds, ARRAY_SIZE(common_cmds) },
  360. { video_cmds, ARRAY_SIZE(video_cmds) },
  361. };
  362. static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
  363. { common_cmds, ARRAY_SIZE(common_cmds) },
  364. { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
  365. };
  366. static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
  367. { common_cmds, ARRAY_SIZE(common_cmds) },
  368. { blt_cmds, ARRAY_SIZE(blt_cmds) },
  369. };
  370. static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
  371. { common_cmds, ARRAY_SIZE(common_cmds) },
  372. { blt_cmds, ARRAY_SIZE(blt_cmds) },
  373. { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
  374. };
  375. /*
  376. * Register whitelists, sorted by increasing register offset.
  377. */
  378. /*
  379. * An individual whitelist entry granting access to register addr. If
  380. * mask is non-zero the argument of immediate register writes will be
  381. * AND-ed with mask, and the command will be rejected if the result
  382. * doesn't match value.
  383. *
  384. * Registers with non-zero mask are only allowed to be written using
  385. * LRI.
  386. */
  387. struct drm_i915_reg_descriptor {
  388. i915_reg_t addr;
  389. u32 mask;
  390. u32 value;
  391. };
  392. /* Convenience macro for adding 32-bit registers. */
  393. #define REG32(_reg, ...) \
  394. { .addr = (_reg), __VA_ARGS__ }
  395. /*
  396. * Convenience macro for adding 64-bit registers.
  397. *
  398. * Some registers that userspace accesses are 64 bits. The register
  399. * access commands only allow 32-bit accesses. Hence, we have to include
  400. * entries for both halves of the 64-bit registers.
  401. */
  402. #define REG64(_reg) \
  403. { .addr = _reg }, \
  404. { .addr = _reg ## _UDW }
  405. #define REG64_IDX(_reg, idx) \
  406. { .addr = _reg(idx) }, \
  407. { .addr = _reg ## _UDW(idx) }
  408. static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
  409. REG64(GPGPU_THREADS_DISPATCHED),
  410. REG64(HS_INVOCATION_COUNT),
  411. REG64(DS_INVOCATION_COUNT),
  412. REG64(IA_VERTICES_COUNT),
  413. REG64(IA_PRIMITIVES_COUNT),
  414. REG64(VS_INVOCATION_COUNT),
  415. REG64(GS_INVOCATION_COUNT),
  416. REG64(GS_PRIMITIVES_COUNT),
  417. REG64(CL_INVOCATION_COUNT),
  418. REG64(CL_PRIMITIVES_COUNT),
  419. REG64(PS_INVOCATION_COUNT),
  420. REG64(PS_DEPTH_COUNT),
  421. REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
  422. REG64(MI_PREDICATE_SRC0),
  423. REG64(MI_PREDICATE_SRC1),
  424. REG32(GEN7_3DPRIM_END_OFFSET),
  425. REG32(GEN7_3DPRIM_START_VERTEX),
  426. REG32(GEN7_3DPRIM_VERTEX_COUNT),
  427. REG32(GEN7_3DPRIM_INSTANCE_COUNT),
  428. REG32(GEN7_3DPRIM_START_INSTANCE),
  429. REG32(GEN7_3DPRIM_BASE_VERTEX),
  430. REG32(GEN7_GPGPU_DISPATCHDIMX),
  431. REG32(GEN7_GPGPU_DISPATCHDIMY),
  432. REG32(GEN7_GPGPU_DISPATCHDIMZ),
  433. REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
  434. REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
  435. REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
  436. REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
  437. REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
  438. REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
  439. REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
  440. REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
  441. REG32(GEN7_SO_WRITE_OFFSET(0)),
  442. REG32(GEN7_SO_WRITE_OFFSET(1)),
  443. REG32(GEN7_SO_WRITE_OFFSET(2)),
  444. REG32(GEN7_SO_WRITE_OFFSET(3)),
  445. REG32(GEN7_L3SQCREG1),
  446. REG32(GEN7_L3CNTLREG2),
  447. REG32(GEN7_L3CNTLREG3),
  448. REG32(HSW_SCRATCH1,
  449. .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
  450. .value = 0),
  451. REG32(HSW_ROW_CHICKEN3,
  452. .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
  453. HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
  454. .value = 0),
  455. };
  456. static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
  457. REG32(BCS_SWCTRL),
  458. };
  459. static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
  460. REG32(FORCEWAKE_MT),
  461. REG32(DERRMR),
  462. REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
  463. REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
  464. REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
  465. };
  466. static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
  467. REG32(FORCEWAKE_MT),
  468. REG32(DERRMR),
  469. };
  470. #undef REG64
  471. #undef REG32
  472. static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
  473. {
  474. u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
  475. u32 subclient =
  476. (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
  477. if (client == INSTR_MI_CLIENT)
  478. return 0x3F;
  479. else if (client == INSTR_RC_CLIENT) {
  480. if (subclient == INSTR_MEDIA_SUBCLIENT)
  481. return 0xFFFF;
  482. else
  483. return 0xFF;
  484. }
  485. DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
  486. return 0;
  487. }
  488. static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
  489. {
  490. u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
  491. u32 subclient =
  492. (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
  493. u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
  494. if (client == INSTR_MI_CLIENT)
  495. return 0x3F;
  496. else if (client == INSTR_RC_CLIENT) {
  497. if (subclient == INSTR_MEDIA_SUBCLIENT) {
  498. if (op == 6)
  499. return 0xFFFF;
  500. else
  501. return 0xFFF;
  502. } else
  503. return 0xFF;
  504. }
  505. DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
  506. return 0;
  507. }
  508. static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
  509. {
  510. u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
  511. if (client == INSTR_MI_CLIENT)
  512. return 0x3F;
  513. else if (client == INSTR_BC_CLIENT)
  514. return 0xFF;
  515. DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
  516. return 0;
  517. }
  518. static bool validate_cmds_sorted(struct intel_engine_cs *ring,
  519. const struct drm_i915_cmd_table *cmd_tables,
  520. int cmd_table_count)
  521. {
  522. int i;
  523. bool ret = true;
  524. if (!cmd_tables || cmd_table_count == 0)
  525. return true;
  526. for (i = 0; i < cmd_table_count; i++) {
  527. const struct drm_i915_cmd_table *table = &cmd_tables[i];
  528. u32 previous = 0;
  529. int j;
  530. for (j = 0; j < table->count; j++) {
  531. const struct drm_i915_cmd_descriptor *desc =
  532. &table->table[j];
  533. u32 curr = desc->cmd.value & desc->cmd.mask;
  534. if (curr < previous) {
  535. DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
  536. ring->id, i, j, curr, previous);
  537. ret = false;
  538. }
  539. previous = curr;
  540. }
  541. }
  542. return ret;
  543. }
  544. static bool check_sorted(int ring_id,
  545. const struct drm_i915_reg_descriptor *reg_table,
  546. int reg_count)
  547. {
  548. int i;
  549. u32 previous = 0;
  550. bool ret = true;
  551. for (i = 0; i < reg_count; i++) {
  552. u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
  553. if (curr < previous) {
  554. DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
  555. ring_id, i, curr, previous);
  556. ret = false;
  557. }
  558. previous = curr;
  559. }
  560. return ret;
  561. }
  562. static bool validate_regs_sorted(struct intel_engine_cs *ring)
  563. {
  564. return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
  565. check_sorted(ring->id, ring->master_reg_table,
  566. ring->master_reg_count);
  567. }
  568. struct cmd_node {
  569. const struct drm_i915_cmd_descriptor *desc;
  570. struct hlist_node node;
  571. };
  572. /*
  573. * Different command ranges have different numbers of bits for the opcode. For
  574. * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
  575. * problem is that, for example, MI commands use bits 22:16 for other fields
  576. * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
  577. * we mask a command from a batch it could hash to the wrong bucket due to
  578. * non-opcode bits being set. But if we don't include those bits, some 3D
  579. * commands may hash to the same bucket due to not including opcode bits that
  580. * make the command unique. For now, we will risk hashing to the same bucket.
  581. *
  582. * If we attempt to generate a perfect hash, we should be able to look at bits
  583. * 31:29 of a command from a batch buffer and use the full mask for that
  584. * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
  585. */
  586. #define CMD_HASH_MASK STD_MI_OPCODE_MASK
  587. static int init_hash_table(struct intel_engine_cs *ring,
  588. const struct drm_i915_cmd_table *cmd_tables,
  589. int cmd_table_count)
  590. {
  591. int i, j;
  592. hash_init(ring->cmd_hash);
  593. for (i = 0; i < cmd_table_count; i++) {
  594. const struct drm_i915_cmd_table *table = &cmd_tables[i];
  595. for (j = 0; j < table->count; j++) {
  596. const struct drm_i915_cmd_descriptor *desc =
  597. &table->table[j];
  598. struct cmd_node *desc_node =
  599. kmalloc(sizeof(*desc_node), GFP_KERNEL);
  600. if (!desc_node)
  601. return -ENOMEM;
  602. desc_node->desc = desc;
  603. hash_add(ring->cmd_hash, &desc_node->node,
  604. desc->cmd.value & CMD_HASH_MASK);
  605. }
  606. }
  607. return 0;
  608. }
  609. static void fini_hash_table(struct intel_engine_cs *ring)
  610. {
  611. struct hlist_node *tmp;
  612. struct cmd_node *desc_node;
  613. int i;
  614. hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
  615. hash_del(&desc_node->node);
  616. kfree(desc_node);
  617. }
  618. }
  619. /**
  620. * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
  621. * @ring: the ringbuffer to initialize
  622. *
  623. * Optionally initializes fields related to batch buffer command parsing in the
  624. * struct intel_engine_cs based on whether the platform requires software
  625. * command parsing.
  626. *
  627. * Return: non-zero if initialization fails
  628. */
  629. int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
  630. {
  631. const struct drm_i915_cmd_table *cmd_tables;
  632. int cmd_table_count;
  633. int ret;
  634. if (!IS_GEN7(ring->dev))
  635. return 0;
  636. switch (ring->id) {
  637. case RCS:
  638. if (IS_HASWELL(ring->dev)) {
  639. cmd_tables = hsw_render_ring_cmds;
  640. cmd_table_count =
  641. ARRAY_SIZE(hsw_render_ring_cmds);
  642. } else {
  643. cmd_tables = gen7_render_cmds;
  644. cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
  645. }
  646. ring->reg_table = gen7_render_regs;
  647. ring->reg_count = ARRAY_SIZE(gen7_render_regs);
  648. if (IS_HASWELL(ring->dev)) {
  649. ring->master_reg_table = hsw_master_regs;
  650. ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
  651. } else {
  652. ring->master_reg_table = ivb_master_regs;
  653. ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
  654. }
  655. ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
  656. break;
  657. case VCS:
  658. cmd_tables = gen7_video_cmds;
  659. cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
  660. ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
  661. break;
  662. case BCS:
  663. if (IS_HASWELL(ring->dev)) {
  664. cmd_tables = hsw_blt_ring_cmds;
  665. cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
  666. } else {
  667. cmd_tables = gen7_blt_cmds;
  668. cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
  669. }
  670. ring->reg_table = gen7_blt_regs;
  671. ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
  672. if (IS_HASWELL(ring->dev)) {
  673. ring->master_reg_table = hsw_master_regs;
  674. ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
  675. } else {
  676. ring->master_reg_table = ivb_master_regs;
  677. ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
  678. }
  679. ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
  680. break;
  681. case VECS:
  682. cmd_tables = hsw_vebox_cmds;
  683. cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
  684. /* VECS can use the same length_mask function as VCS */
  685. ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
  686. break;
  687. default:
  688. DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
  689. ring->id);
  690. BUG();
  691. }
  692. BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
  693. BUG_ON(!validate_regs_sorted(ring));
  694. WARN_ON(!hash_empty(ring->cmd_hash));
  695. ret = init_hash_table(ring, cmd_tables, cmd_table_count);
  696. if (ret) {
  697. DRM_ERROR("CMD: cmd_parser_init failed!\n");
  698. fini_hash_table(ring);
  699. return ret;
  700. }
  701. ring->needs_cmd_parser = true;
  702. return 0;
  703. }
  704. /**
  705. * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
  706. * @ring: the ringbuffer to clean up
  707. *
  708. * Releases any resources related to command parsing that may have been
  709. * initialized for the specified ring.
  710. */
  711. void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
  712. {
  713. if (!ring->needs_cmd_parser)
  714. return;
  715. fini_hash_table(ring);
  716. }
  717. static const struct drm_i915_cmd_descriptor*
  718. find_cmd_in_table(struct intel_engine_cs *ring,
  719. u32 cmd_header)
  720. {
  721. struct cmd_node *desc_node;
  722. hash_for_each_possible(ring->cmd_hash, desc_node, node,
  723. cmd_header & CMD_HASH_MASK) {
  724. const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
  725. u32 masked_cmd = desc->cmd.mask & cmd_header;
  726. u32 masked_value = desc->cmd.value & desc->cmd.mask;
  727. if (masked_cmd == masked_value)
  728. return desc;
  729. }
  730. return NULL;
  731. }
  732. /*
  733. * Returns a pointer to a descriptor for the command specified by cmd_header.
  734. *
  735. * The caller must supply space for a default descriptor via the default_desc
  736. * parameter. If no descriptor for the specified command exists in the ring's
  737. * command parser tables, this function fills in default_desc based on the
  738. * ring's default length encoding and returns default_desc.
  739. */
  740. static const struct drm_i915_cmd_descriptor*
  741. find_cmd(struct intel_engine_cs *ring,
  742. u32 cmd_header,
  743. struct drm_i915_cmd_descriptor *default_desc)
  744. {
  745. const struct drm_i915_cmd_descriptor *desc;
  746. u32 mask;
  747. desc = find_cmd_in_table(ring, cmd_header);
  748. if (desc)
  749. return desc;
  750. mask = ring->get_cmd_length_mask(cmd_header);
  751. if (!mask)
  752. return NULL;
  753. BUG_ON(!default_desc);
  754. default_desc->flags = CMD_DESC_SKIP;
  755. default_desc->length.mask = mask;
  756. return default_desc;
  757. }
  758. static const struct drm_i915_reg_descriptor *
  759. find_reg(const struct drm_i915_reg_descriptor *table,
  760. int count, u32 addr)
  761. {
  762. if (table) {
  763. int i;
  764. for (i = 0; i < count; i++) {
  765. if (i915_mmio_reg_offset(table[i].addr) == addr)
  766. return &table[i];
  767. }
  768. }
  769. return NULL;
  770. }
  771. static u32 *vmap_batch(struct drm_i915_gem_object *obj,
  772. unsigned start, unsigned len)
  773. {
  774. int i;
  775. void *addr = NULL;
  776. struct sg_page_iter sg_iter;
  777. int first_page = start >> PAGE_SHIFT;
  778. int last_page = (len + start + 4095) >> PAGE_SHIFT;
  779. int npages = last_page - first_page;
  780. struct page **pages;
  781. pages = drm_malloc_ab(npages, sizeof(*pages));
  782. if (pages == NULL) {
  783. DRM_DEBUG_DRIVER("Failed to get space for pages\n");
  784. goto finish;
  785. }
  786. i = 0;
  787. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
  788. pages[i++] = sg_page_iter_page(&sg_iter);
  789. if (i == npages)
  790. break;
  791. }
  792. addr = vmap(pages, i, 0, PAGE_KERNEL);
  793. if (addr == NULL) {
  794. DRM_DEBUG_DRIVER("Failed to vmap pages\n");
  795. goto finish;
  796. }
  797. finish:
  798. if (pages)
  799. drm_free_large(pages);
  800. return (u32*)addr;
  801. }
  802. /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
  803. static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
  804. struct drm_i915_gem_object *src_obj,
  805. u32 batch_start_offset,
  806. u32 batch_len)
  807. {
  808. int needs_clflush = 0;
  809. void *src_base, *src;
  810. void *dst = NULL;
  811. int ret;
  812. if (batch_len > dest_obj->base.size ||
  813. batch_len + batch_start_offset > src_obj->base.size)
  814. return ERR_PTR(-E2BIG);
  815. if (WARN_ON(dest_obj->pages_pin_count == 0))
  816. return ERR_PTR(-ENODEV);
  817. ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
  818. if (ret) {
  819. DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
  820. return ERR_PTR(ret);
  821. }
  822. src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
  823. if (!src_base) {
  824. DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
  825. ret = -ENOMEM;
  826. goto unpin_src;
  827. }
  828. ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
  829. if (ret) {
  830. DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
  831. goto unmap_src;
  832. }
  833. dst = vmap_batch(dest_obj, 0, batch_len);
  834. if (!dst) {
  835. DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
  836. ret = -ENOMEM;
  837. goto unmap_src;
  838. }
  839. src = src_base + offset_in_page(batch_start_offset);
  840. if (needs_clflush)
  841. drm_clflush_virt_range(src, batch_len);
  842. memcpy(dst, src, batch_len);
  843. unmap_src:
  844. vunmap(src_base);
  845. unpin_src:
  846. i915_gem_object_unpin_pages(src_obj);
  847. return ret ? ERR_PTR(ret) : dst;
  848. }
  849. /**
  850. * i915_needs_cmd_parser() - should a given ring use software command parsing?
  851. * @ring: the ring in question
  852. *
  853. * Only certain platforms require software batch buffer command parsing, and
  854. * only when enabled via module parameter.
  855. *
  856. * Return: true if the ring requires software command parsing
  857. */
  858. bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
  859. {
  860. if (!ring->needs_cmd_parser)
  861. return false;
  862. if (!USES_PPGTT(ring->dev))
  863. return false;
  864. return (i915.enable_cmd_parser == 1);
  865. }
  866. static bool check_cmd(const struct intel_engine_cs *ring,
  867. const struct drm_i915_cmd_descriptor *desc,
  868. const u32 *cmd, u32 length,
  869. const bool is_master,
  870. bool *oacontrol_set)
  871. {
  872. if (desc->flags & CMD_DESC_REJECT) {
  873. DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
  874. return false;
  875. }
  876. if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
  877. DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
  878. *cmd);
  879. return false;
  880. }
  881. if (desc->flags & CMD_DESC_REGISTER) {
  882. /*
  883. * Get the distance between individual register offset
  884. * fields if the command can perform more than one
  885. * access at a time.
  886. */
  887. const u32 step = desc->reg.step ? desc->reg.step : length;
  888. u32 offset;
  889. for (offset = desc->reg.offset; offset < length;
  890. offset += step) {
  891. const u32 reg_addr = cmd[offset] & desc->reg.mask;
  892. const struct drm_i915_reg_descriptor *reg =
  893. find_reg(ring->reg_table, ring->reg_count,
  894. reg_addr);
  895. if (!reg && is_master)
  896. reg = find_reg(ring->master_reg_table,
  897. ring->master_reg_count,
  898. reg_addr);
  899. if (!reg) {
  900. DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
  901. reg_addr, *cmd, ring->id);
  902. return false;
  903. }
  904. /*
  905. * OACONTROL requires some special handling for
  906. * writes. We want to make sure that any batch which
  907. * enables OA also disables it before the end of the
  908. * batch. The goal is to prevent one process from
  909. * snooping on the perf data from another process. To do
  910. * that, we need to check the value that will be written
  911. * to the register. Hence, limit OACONTROL writes to
  912. * only MI_LOAD_REGISTER_IMM commands.
  913. */
  914. if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
  915. if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
  916. DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
  917. return false;
  918. }
  919. if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
  920. *oacontrol_set = (cmd[offset + 1] != 0);
  921. }
  922. /*
  923. * Check the value written to the register against the
  924. * allowed mask/value pair given in the whitelist entry.
  925. */
  926. if (reg->mask) {
  927. if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
  928. DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
  929. reg_addr);
  930. return false;
  931. }
  932. if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
  933. (offset + 2 > length ||
  934. (cmd[offset + 1] & reg->mask) != reg->value)) {
  935. DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
  936. reg_addr);
  937. return false;
  938. }
  939. }
  940. }
  941. }
  942. if (desc->flags & CMD_DESC_BITMASK) {
  943. int i;
  944. for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
  945. u32 dword;
  946. if (desc->bits[i].mask == 0)
  947. break;
  948. if (desc->bits[i].condition_mask != 0) {
  949. u32 offset =
  950. desc->bits[i].condition_offset;
  951. u32 condition = cmd[offset] &
  952. desc->bits[i].condition_mask;
  953. if (condition == 0)
  954. continue;
  955. }
  956. dword = cmd[desc->bits[i].offset] &
  957. desc->bits[i].mask;
  958. if (dword != desc->bits[i].expected) {
  959. DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
  960. *cmd,
  961. desc->bits[i].mask,
  962. desc->bits[i].expected,
  963. dword, ring->id);
  964. return false;
  965. }
  966. }
  967. }
  968. return true;
  969. }
  970. #define LENGTH_BIAS 2
  971. /**
  972. * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
  973. * @ring: the ring on which the batch is to execute
  974. * @batch_obj: the batch buffer in question
  975. * @shadow_batch_obj: copy of the batch buffer in question
  976. * @batch_start_offset: byte offset in the batch at which execution starts
  977. * @batch_len: length of the commands in batch_obj
  978. * @is_master: is the submitting process the drm master?
  979. *
  980. * Parses the specified batch buffer looking for privilege violations as
  981. * described in the overview.
  982. *
  983. * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  984. * if the batch appears legal but should use hardware parsing
  985. */
  986. int i915_parse_cmds(struct intel_engine_cs *ring,
  987. struct drm_i915_gem_object *batch_obj,
  988. struct drm_i915_gem_object *shadow_batch_obj,
  989. u32 batch_start_offset,
  990. u32 batch_len,
  991. bool is_master)
  992. {
  993. u32 *cmd, *batch_base, *batch_end;
  994. struct drm_i915_cmd_descriptor default_desc = { 0 };
  995. bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
  996. int ret = 0;
  997. batch_base = copy_batch(shadow_batch_obj, batch_obj,
  998. batch_start_offset, batch_len);
  999. if (IS_ERR(batch_base)) {
  1000. DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
  1001. return PTR_ERR(batch_base);
  1002. }
  1003. /*
  1004. * We use the batch length as size because the shadow object is as
  1005. * large or larger and copy_batch() will write MI_NOPs to the extra
  1006. * space. Parsing should be faster in some cases this way.
  1007. */
  1008. batch_end = batch_base + (batch_len / sizeof(*batch_end));
  1009. cmd = batch_base;
  1010. while (cmd < batch_end) {
  1011. const struct drm_i915_cmd_descriptor *desc;
  1012. u32 length;
  1013. if (*cmd == MI_BATCH_BUFFER_END)
  1014. break;
  1015. desc = find_cmd(ring, *cmd, &default_desc);
  1016. if (!desc) {
  1017. DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
  1018. *cmd);
  1019. ret = -EINVAL;
  1020. break;
  1021. }
  1022. /*
  1023. * If the batch buffer contains a chained batch, return an
  1024. * error that tells the caller to abort and dispatch the
  1025. * workload as a non-secure batch.
  1026. */
  1027. if (desc->cmd.value == MI_BATCH_BUFFER_START) {
  1028. ret = -EACCES;
  1029. break;
  1030. }
  1031. if (desc->flags & CMD_DESC_FIXED)
  1032. length = desc->length.fixed;
  1033. else
  1034. length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
  1035. if ((batch_end - cmd) < length) {
  1036. DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
  1037. *cmd,
  1038. length,
  1039. batch_end - cmd);
  1040. ret = -EINVAL;
  1041. break;
  1042. }
  1043. if (!check_cmd(ring, desc, cmd, length, is_master,
  1044. &oacontrol_set)) {
  1045. ret = -EINVAL;
  1046. break;
  1047. }
  1048. cmd += length;
  1049. }
  1050. if (oacontrol_set) {
  1051. DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
  1052. ret = -EINVAL;
  1053. }
  1054. if (cmd >= batch_end) {
  1055. DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
  1056. ret = -EINVAL;
  1057. }
  1058. vunmap(batch_base);
  1059. return ret;
  1060. }
  1061. /**
  1062. * i915_cmd_parser_get_version() - get the cmd parser version number
  1063. *
  1064. * The cmd parser maintains a simple increasing integer version number suitable
  1065. * for passing to userspace clients to determine what operations are permitted.
  1066. *
  1067. * Return: the current version number of the cmd parser
  1068. */
  1069. int i915_cmd_parser_get_version(void)
  1070. {
  1071. /*
  1072. * Command parser version history
  1073. *
  1074. * 1. Initial version. Checks batches and reports violations, but leaves
  1075. * hardware parsing enabled (so does not allow new use cases).
  1076. * 2. Allow access to the MI_PREDICATE_SRC0 and
  1077. * MI_PREDICATE_SRC1 registers.
  1078. * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
  1079. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
  1080. * 5. GPGPU dispatch compute indirect registers.
  1081. */
  1082. return 5;
  1083. }