i915_gem_stolen.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*
  2. * Copyright © 2008-2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. /*
  32. * The BIOS typically reserves some of the system's memory for the exclusive
  33. * use of the integrated graphics. This memory is no longer available for
  34. * use by the OS and so the user finds that his system has less memory
  35. * available than he put in. We refer to this memory as stolen.
  36. *
  37. * The BIOS will allocate its framebuffer from the stolen memory. Our
  38. * goal is try to reuse that object for our own fbcon which must always
  39. * be available for panics. Anything else we can reuse the stolen memory
  40. * for is a boon.
  41. */
  42. int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
  43. struct drm_mm_node *node, u64 size,
  44. unsigned alignment, u64 start, u64 end)
  45. {
  46. int ret;
  47. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  48. return -ENODEV;
  49. /* WaSkipStolenMemoryFirstPage:bdw+ */
  50. if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
  51. start = 4096;
  52. mutex_lock(&dev_priv->mm.stolen_lock);
  53. ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
  54. size, alignment, 0,
  55. start, end, DRM_MM_INSERT_BEST);
  56. mutex_unlock(&dev_priv->mm.stolen_lock);
  57. return ret;
  58. }
  59. int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
  60. struct drm_mm_node *node, u64 size,
  61. unsigned alignment)
  62. {
  63. return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
  64. alignment, 0, U64_MAX);
  65. }
  66. void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
  67. struct drm_mm_node *node)
  68. {
  69. mutex_lock(&dev_priv->mm.stolen_lock);
  70. drm_mm_remove_node(node);
  71. mutex_unlock(&dev_priv->mm.stolen_lock);
  72. }
  73. static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
  74. struct resource *dsm)
  75. {
  76. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  77. struct resource *r;
  78. if (dsm->start == 0 || dsm->end <= dsm->start)
  79. return -EINVAL;
  80. /*
  81. * TODO: We have yet too encounter the case where the GTT wasn't at the
  82. * end of stolen. With that assumption we could simplify this.
  83. */
  84. /* Make sure we don't clobber the GTT if it's within stolen memory */
  85. if (INTEL_GEN(dev_priv) <= 4 &&
  86. !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
  87. struct resource stolen[2] = {*dsm, *dsm};
  88. struct resource ggtt_res;
  89. resource_size_t ggtt_start;
  90. ggtt_start = I915_READ(PGTBL_CTL);
  91. if (IS_GEN4(dev_priv))
  92. ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
  93. (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
  94. else
  95. ggtt_start &= PGTBL_ADDRESS_LO_MASK;
  96. ggtt_res =
  97. (struct resource) DEFINE_RES_MEM(ggtt_start,
  98. ggtt_total_entries(ggtt) * 4);
  99. if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
  100. stolen[0].end = ggtt_res.start;
  101. if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
  102. stolen[1].start = ggtt_res.end;
  103. /* Pick the larger of the two chunks */
  104. if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
  105. *dsm = stolen[0];
  106. else
  107. *dsm = stolen[1];
  108. if (stolen[0].start != stolen[1].start ||
  109. stolen[0].end != stolen[1].end) {
  110. DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
  111. DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
  112. }
  113. }
  114. /*
  115. * Verify that nothing else uses this physical address. Stolen
  116. * memory should be reserved by the BIOS and hidden from the
  117. * kernel. So if the region is already marked as busy, something
  118. * is seriously wrong.
  119. */
  120. r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
  121. resource_size(dsm),
  122. "Graphics Stolen Memory");
  123. if (r == NULL) {
  124. /*
  125. * One more attempt but this time requesting region from
  126. * start + 1, as we have seen that this resolves the region
  127. * conflict with the PCI Bus.
  128. * This is a BIOS w/a: Some BIOS wrap stolen in the root
  129. * PCI bus, but have an off-by-one error. Hence retry the
  130. * reservation starting from 1 instead of 0.
  131. * There's also BIOS with off-by-one on the other end.
  132. */
  133. r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
  134. resource_size(dsm) - 2,
  135. "Graphics Stolen Memory");
  136. /*
  137. * GEN3 firmware likes to smash pci bridges into the stolen
  138. * range. Apparently this works.
  139. */
  140. if (r == NULL && !IS_GEN3(dev_priv)) {
  141. DRM_ERROR("conflict detected with stolen region: %pR\n",
  142. dsm);
  143. return -EBUSY;
  144. }
  145. }
  146. return 0;
  147. }
  148. void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
  149. {
  150. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  151. return;
  152. drm_mm_takedown(&dev_priv->mm.stolen);
  153. }
  154. static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
  155. resource_size_t *base,
  156. resource_size_t *size)
  157. {
  158. u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
  159. CTG_STOLEN_RESERVED :
  160. ELK_STOLEN_RESERVED);
  161. resource_size_t stolen_top = dev_priv->dsm.end + 1;
  162. DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
  163. IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
  164. if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
  165. return;
  166. /*
  167. * Whether ILK really reuses the ELK register for this is unclear.
  168. * Let's see if we catch anyone with this supposedly enabled on ILK.
  169. */
  170. WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
  171. if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
  172. return;
  173. *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
  174. WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
  175. *size = stolen_top - *base;
  176. }
  177. static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
  178. resource_size_t *base,
  179. resource_size_t *size)
  180. {
  181. u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  182. DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
  183. if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
  184. return;
  185. *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  186. switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
  187. case GEN6_STOLEN_RESERVED_1M:
  188. *size = 1024 * 1024;
  189. break;
  190. case GEN6_STOLEN_RESERVED_512K:
  191. *size = 512 * 1024;
  192. break;
  193. case GEN6_STOLEN_RESERVED_256K:
  194. *size = 256 * 1024;
  195. break;
  196. case GEN6_STOLEN_RESERVED_128K:
  197. *size = 128 * 1024;
  198. break;
  199. default:
  200. *size = 1024 * 1024;
  201. MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
  202. }
  203. }
  204. static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
  205. resource_size_t *base,
  206. resource_size_t *size)
  207. {
  208. u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  209. resource_size_t stolen_top = dev_priv->dsm.end + 1;
  210. DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
  211. if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
  212. return;
  213. switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
  214. default:
  215. MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
  216. /* fall through */
  217. case GEN7_STOLEN_RESERVED_1M:
  218. *size = 1024 * 1024;
  219. break;
  220. }
  221. /*
  222. * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
  223. * reserved location as (top - size).
  224. */
  225. *base = stolen_top - *size;
  226. }
  227. static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
  228. resource_size_t *base,
  229. resource_size_t *size)
  230. {
  231. u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  232. DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
  233. if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
  234. return;
  235. *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
  236. switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
  237. case GEN7_STOLEN_RESERVED_1M:
  238. *size = 1024 * 1024;
  239. break;
  240. case GEN7_STOLEN_RESERVED_256K:
  241. *size = 256 * 1024;
  242. break;
  243. default:
  244. *size = 1024 * 1024;
  245. MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
  246. }
  247. }
  248. static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
  249. resource_size_t *base,
  250. resource_size_t *size)
  251. {
  252. u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  253. DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
  254. if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
  255. return;
  256. *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  257. switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
  258. case GEN8_STOLEN_RESERVED_1M:
  259. *size = 1024 * 1024;
  260. break;
  261. case GEN8_STOLEN_RESERVED_2M:
  262. *size = 2 * 1024 * 1024;
  263. break;
  264. case GEN8_STOLEN_RESERVED_4M:
  265. *size = 4 * 1024 * 1024;
  266. break;
  267. case GEN8_STOLEN_RESERVED_8M:
  268. *size = 8 * 1024 * 1024;
  269. break;
  270. default:
  271. *size = 8 * 1024 * 1024;
  272. MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
  273. }
  274. }
  275. static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
  276. resource_size_t *base,
  277. resource_size_t *size)
  278. {
  279. u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  280. resource_size_t stolen_top = dev_priv->dsm.end + 1;
  281. DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
  282. if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
  283. return;
  284. if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
  285. return;
  286. *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  287. *size = stolen_top - *base;
  288. }
  289. static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
  290. resource_size_t *base,
  291. resource_size_t *size)
  292. {
  293. u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
  294. DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
  295. *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
  296. switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
  297. case GEN8_STOLEN_RESERVED_1M:
  298. *size = 1024 * 1024;
  299. break;
  300. case GEN8_STOLEN_RESERVED_2M:
  301. *size = 2 * 1024 * 1024;
  302. break;
  303. case GEN8_STOLEN_RESERVED_4M:
  304. *size = 4 * 1024 * 1024;
  305. break;
  306. case GEN8_STOLEN_RESERVED_8M:
  307. *size = 8 * 1024 * 1024;
  308. break;
  309. default:
  310. *size = 8 * 1024 * 1024;
  311. MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
  312. }
  313. }
  314. int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
  315. {
  316. resource_size_t reserved_base, stolen_top;
  317. resource_size_t reserved_total, reserved_size;
  318. mutex_init(&dev_priv->mm.stolen_lock);
  319. if (intel_vgpu_active(dev_priv)) {
  320. DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
  321. return 0;
  322. }
  323. if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
  324. DRM_INFO("DMAR active, disabling use of stolen memory\n");
  325. return 0;
  326. }
  327. if (resource_size(&intel_graphics_stolen_res) == 0)
  328. return 0;
  329. dev_priv->dsm = intel_graphics_stolen_res;
  330. if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
  331. return 0;
  332. GEM_BUG_ON(dev_priv->dsm.start == 0);
  333. GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
  334. stolen_top = dev_priv->dsm.end + 1;
  335. reserved_base = stolen_top;
  336. reserved_size = 0;
  337. switch (INTEL_GEN(dev_priv)) {
  338. case 2:
  339. case 3:
  340. break;
  341. case 4:
  342. if (!IS_G4X(dev_priv))
  343. break;
  344. /* fall through */
  345. case 5:
  346. g4x_get_stolen_reserved(dev_priv,
  347. &reserved_base, &reserved_size);
  348. break;
  349. case 6:
  350. gen6_get_stolen_reserved(dev_priv,
  351. &reserved_base, &reserved_size);
  352. break;
  353. case 7:
  354. if (IS_VALLEYVIEW(dev_priv))
  355. vlv_get_stolen_reserved(dev_priv,
  356. &reserved_base, &reserved_size);
  357. else
  358. gen7_get_stolen_reserved(dev_priv,
  359. &reserved_base, &reserved_size);
  360. break;
  361. case 8:
  362. case 9:
  363. case 10:
  364. if (IS_LP(dev_priv))
  365. chv_get_stolen_reserved(dev_priv,
  366. &reserved_base, &reserved_size);
  367. else
  368. bdw_get_stolen_reserved(dev_priv,
  369. &reserved_base, &reserved_size);
  370. break;
  371. case 11:
  372. default:
  373. icl_get_stolen_reserved(dev_priv, &reserved_base,
  374. &reserved_size);
  375. break;
  376. }
  377. /*
  378. * Our expectation is that the reserved space is at the top of the
  379. * stolen region and *never* at the bottom. If we see !reserved_base,
  380. * it likely means we failed to read the registers correctly.
  381. */
  382. if (!reserved_base) {
  383. DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
  384. &reserved_base, &reserved_size);
  385. reserved_base = stolen_top;
  386. reserved_size = 0;
  387. }
  388. dev_priv->dsm_reserved =
  389. (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
  390. if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
  391. DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
  392. &dev_priv->dsm_reserved, &dev_priv->dsm);
  393. return 0;
  394. }
  395. /* It is possible for the reserved area to end before the end of stolen
  396. * memory, so just consider the start. */
  397. reserved_total = stolen_top - reserved_base;
  398. DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
  399. (u64)resource_size(&dev_priv->dsm) >> 10,
  400. ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
  401. dev_priv->stolen_usable_size =
  402. resource_size(&dev_priv->dsm) - reserved_total;
  403. /* Basic memrange allocator for stolen space. */
  404. drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
  405. return 0;
  406. }
  407. static struct sg_table *
  408. i915_pages_create_for_stolen(struct drm_device *dev,
  409. resource_size_t offset, resource_size_t size)
  410. {
  411. struct drm_i915_private *dev_priv = to_i915(dev);
  412. struct sg_table *st;
  413. struct scatterlist *sg;
  414. GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
  415. /* We hide that we have no struct page backing our stolen object
  416. * by wrapping the contiguous physical allocation with a fake
  417. * dma mapping in a single scatterlist.
  418. */
  419. st = kmalloc(sizeof(*st), GFP_KERNEL);
  420. if (st == NULL)
  421. return ERR_PTR(-ENOMEM);
  422. if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  423. kfree(st);
  424. return ERR_PTR(-ENOMEM);
  425. }
  426. sg = st->sgl;
  427. sg->offset = 0;
  428. sg->length = size;
  429. sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
  430. sg_dma_len(sg) = size;
  431. return st;
  432. }
  433. static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
  434. {
  435. struct sg_table *pages =
  436. i915_pages_create_for_stolen(obj->base.dev,
  437. obj->stolen->start,
  438. obj->stolen->size);
  439. if (IS_ERR(pages))
  440. return PTR_ERR(pages);
  441. __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
  442. return 0;
  443. }
  444. static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
  445. struct sg_table *pages)
  446. {
  447. /* Should only be called from i915_gem_object_release_stolen() */
  448. sg_free_table(pages);
  449. kfree(pages);
  450. }
  451. static void
  452. i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
  453. {
  454. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  455. struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
  456. GEM_BUG_ON(!stolen);
  457. __i915_gem_object_unpin_pages(obj);
  458. i915_gem_stolen_remove_node(dev_priv, stolen);
  459. kfree(stolen);
  460. }
  461. static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
  462. .get_pages = i915_gem_object_get_pages_stolen,
  463. .put_pages = i915_gem_object_put_pages_stolen,
  464. .release = i915_gem_object_release_stolen,
  465. };
  466. static struct drm_i915_gem_object *
  467. _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  468. struct drm_mm_node *stolen)
  469. {
  470. struct drm_i915_gem_object *obj;
  471. unsigned int cache_level;
  472. obj = i915_gem_object_alloc(dev_priv);
  473. if (obj == NULL)
  474. return NULL;
  475. drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
  476. i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
  477. obj->stolen = stolen;
  478. obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
  479. cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
  480. i915_gem_object_set_cache_coherency(obj, cache_level);
  481. if (i915_gem_object_pin_pages(obj))
  482. goto cleanup;
  483. return obj;
  484. cleanup:
  485. i915_gem_object_free(obj);
  486. return NULL;
  487. }
  488. struct drm_i915_gem_object *
  489. i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  490. resource_size_t size)
  491. {
  492. struct drm_i915_gem_object *obj;
  493. struct drm_mm_node *stolen;
  494. int ret;
  495. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  496. return NULL;
  497. if (size == 0)
  498. return NULL;
  499. stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  500. if (!stolen)
  501. return NULL;
  502. ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
  503. if (ret) {
  504. kfree(stolen);
  505. return NULL;
  506. }
  507. obj = _i915_gem_object_create_stolen(dev_priv, stolen);
  508. if (obj)
  509. return obj;
  510. i915_gem_stolen_remove_node(dev_priv, stolen);
  511. kfree(stolen);
  512. return NULL;
  513. }
  514. struct drm_i915_gem_object *
  515. i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
  516. resource_size_t stolen_offset,
  517. resource_size_t gtt_offset,
  518. resource_size_t size)
  519. {
  520. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  521. struct drm_i915_gem_object *obj;
  522. struct drm_mm_node *stolen;
  523. struct i915_vma *vma;
  524. int ret;
  525. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  526. return NULL;
  527. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  528. DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
  529. &stolen_offset, &gtt_offset, &size);
  530. /* KISS and expect everything to be page-aligned */
  531. if (WARN_ON(size == 0) ||
  532. WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
  533. WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
  534. return NULL;
  535. stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  536. if (!stolen)
  537. return NULL;
  538. stolen->start = stolen_offset;
  539. stolen->size = size;
  540. mutex_lock(&dev_priv->mm.stolen_lock);
  541. ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
  542. mutex_unlock(&dev_priv->mm.stolen_lock);
  543. if (ret) {
  544. DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
  545. kfree(stolen);
  546. return NULL;
  547. }
  548. obj = _i915_gem_object_create_stolen(dev_priv, stolen);
  549. if (obj == NULL) {
  550. DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
  551. i915_gem_stolen_remove_node(dev_priv, stolen);
  552. kfree(stolen);
  553. return NULL;
  554. }
  555. /* Some objects just need physical mem from stolen space */
  556. if (gtt_offset == I915_GTT_OFFSET_NONE)
  557. return obj;
  558. ret = i915_gem_object_pin_pages(obj);
  559. if (ret)
  560. goto err;
  561. vma = i915_vma_instance(obj, &ggtt->vm, NULL);
  562. if (IS_ERR(vma)) {
  563. ret = PTR_ERR(vma);
  564. goto err_pages;
  565. }
  566. /* To simplify the initialisation sequence between KMS and GTT,
  567. * we allow construction of the stolen object prior to
  568. * setting up the GTT space. The actual reservation will occur
  569. * later.
  570. */
  571. ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
  572. size, gtt_offset, obj->cache_level,
  573. 0);
  574. if (ret) {
  575. DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
  576. goto err_pages;
  577. }
  578. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  579. vma->pages = obj->mm.pages;
  580. vma->flags |= I915_VMA_GLOBAL_BIND;
  581. __i915_vma_set_map_and_fenceable(vma);
  582. list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
  583. spin_lock(&dev_priv->mm.obj_lock);
  584. list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
  585. obj->bind_count++;
  586. spin_unlock(&dev_priv->mm.obj_lock);
  587. return obj;
  588. err_pages:
  589. i915_gem_object_unpin_pages(obj);
  590. err:
  591. i915_gem_object_put(obj);
  592. return NULL;
  593. }