i915_gem_stolen.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * Copyright © 2008-2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. /*
  32. * The BIOS typically reserves some of the system's memory for the exclusive
  33. * use of the integrated graphics. This memory is no longer available for
  34. * use by the OS and so the user finds that his system has less memory
  35. * available than he put in. We refer to this memory as stolen.
  36. *
  37. * The BIOS will allocate its framebuffer from the stolen memory. Our
  38. * goal is try to reuse that object for our own fbcon which must always
  39. * be available for panics. Anything else we can reuse the stolen memory
  40. * for is a boon.
  41. */
  42. int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
  43. struct drm_mm_node *node, u64 size,
  44. unsigned alignment, u64 start, u64 end)
  45. {
  46. int ret;
  47. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  48. return -ENODEV;
  49. mutex_lock(&dev_priv->mm.stolen_lock);
  50. ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
  51. size, alignment, 0,
  52. start, end, DRM_MM_INSERT_BEST);
  53. mutex_unlock(&dev_priv->mm.stolen_lock);
  54. return ret;
  55. }
  56. int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
  57. struct drm_mm_node *node, u64 size,
  58. unsigned alignment)
  59. {
  60. return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
  61. alignment, 0, U64_MAX);
  62. }
  63. void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
  64. struct drm_mm_node *node)
  65. {
  66. mutex_lock(&dev_priv->mm.stolen_lock);
  67. drm_mm_remove_node(node);
  68. mutex_unlock(&dev_priv->mm.stolen_lock);
  69. }
  70. static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
  71. struct resource *dsm)
  72. {
  73. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  74. struct resource *r;
  75. if (dsm->start == 0 || dsm->end <= dsm->start)
  76. return -EINVAL;
  77. /*
  78. * TODO: We have yet too encounter the case where the GTT wasn't at the
  79. * end of stolen. With that assumption we could simplify this.
  80. */
  81. /* Make sure we don't clobber the GTT if it's within stolen memory */
  82. if (INTEL_GEN(dev_priv) <= 4 &&
  83. !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
  84. struct resource stolen[2] = {*dsm, *dsm};
  85. struct resource ggtt_res;
  86. resource_size_t ggtt_start;
  87. ggtt_start = I915_READ(PGTBL_CTL);
  88. if (IS_GEN4(dev_priv))
  89. ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
  90. (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
  91. else
  92. ggtt_start &= PGTBL_ADDRESS_LO_MASK;
  93. ggtt_res =
  94. (struct resource) DEFINE_RES_MEM(ggtt_start,
  95. ggtt_total_entries(ggtt) * 4);
  96. if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
  97. stolen[0].end = ggtt_res.start;
  98. if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
  99. stolen[1].start = ggtt_res.end;
  100. /* Pick the larger of the two chunks */
  101. if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
  102. *dsm = stolen[0];
  103. else
  104. *dsm = stolen[1];
  105. if (stolen[0].start != stolen[1].start ||
  106. stolen[0].end != stolen[1].end) {
  107. DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res);
  108. DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm);
  109. }
  110. }
  111. /*
  112. * Verify that nothing else uses this physical address. Stolen
  113. * memory should be reserved by the BIOS and hidden from the
  114. * kernel. So if the region is already marked as busy, something
  115. * is seriously wrong.
  116. */
  117. r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
  118. resource_size(dsm),
  119. "Graphics Stolen Memory");
  120. if (r == NULL) {
  121. /*
  122. * One more attempt but this time requesting region from
  123. * start + 1, as we have seen that this resolves the region
  124. * conflict with the PCI Bus.
  125. * This is a BIOS w/a: Some BIOS wrap stolen in the root
  126. * PCI bus, but have an off-by-one error. Hence retry the
  127. * reservation starting from 1 instead of 0.
  128. * There's also BIOS with off-by-one on the other end.
  129. */
  130. r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
  131. resource_size(dsm) - 2,
  132. "Graphics Stolen Memory");
  133. /*
  134. * GEN3 firmware likes to smash pci bridges into the stolen
  135. * range. Apparently this works.
  136. */
  137. if (r == NULL && !IS_GEN3(dev_priv)) {
  138. DRM_ERROR("conflict detected with stolen region: %pR\n",
  139. dsm);
  140. return -EBUSY;
  141. }
  142. }
  143. return 0;
  144. }
  145. void i915_gem_cleanup_stolen(struct drm_device *dev)
  146. {
  147. struct drm_i915_private *dev_priv = to_i915(dev);
  148. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  149. return;
  150. drm_mm_takedown(&dev_priv->mm.stolen);
  151. }
  152. static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
  153. resource_size_t *base, resource_size_t *size)
  154. {
  155. uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
  156. CTG_STOLEN_RESERVED :
  157. ELK_STOLEN_RESERVED);
  158. resource_size_t stolen_top = dev_priv->dsm.end + 1;
  159. if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
  160. *base = 0;
  161. *size = 0;
  162. return;
  163. }
  164. /*
  165. * Whether ILK really reuses the ELK register for this is unclear.
  166. * Let's see if we catch anyone with this supposedly enabled on ILK.
  167. */
  168. WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
  169. *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
  170. WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
  171. /* On these platforms, the register doesn't have a size field, so the
  172. * size is the distance between the base and the top of the stolen
  173. * memory. We also have the genuine case where base is zero and there's
  174. * nothing reserved. */
  175. if (*base == 0)
  176. *size = 0;
  177. else
  178. *size = stolen_top - *base;
  179. }
  180. static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
  181. resource_size_t *base, resource_size_t *size)
  182. {
  183. uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  184. if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
  185. *base = 0;
  186. *size = 0;
  187. return;
  188. }
  189. *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  190. switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
  191. case GEN6_STOLEN_RESERVED_1M:
  192. *size = 1024 * 1024;
  193. break;
  194. case GEN6_STOLEN_RESERVED_512K:
  195. *size = 512 * 1024;
  196. break;
  197. case GEN6_STOLEN_RESERVED_256K:
  198. *size = 256 * 1024;
  199. break;
  200. case GEN6_STOLEN_RESERVED_128K:
  201. *size = 128 * 1024;
  202. break;
  203. default:
  204. *size = 1024 * 1024;
  205. MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
  206. }
  207. }
  208. static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
  209. resource_size_t *base, resource_size_t *size)
  210. {
  211. uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  212. if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
  213. *base = 0;
  214. *size = 0;
  215. return;
  216. }
  217. *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
  218. switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
  219. case GEN7_STOLEN_RESERVED_1M:
  220. *size = 1024 * 1024;
  221. break;
  222. case GEN7_STOLEN_RESERVED_256K:
  223. *size = 256 * 1024;
  224. break;
  225. default:
  226. *size = 1024 * 1024;
  227. MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
  228. }
  229. }
  230. static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
  231. resource_size_t *base, resource_size_t *size)
  232. {
  233. uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  234. if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
  235. *base = 0;
  236. *size = 0;
  237. return;
  238. }
  239. *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  240. switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
  241. case GEN8_STOLEN_RESERVED_1M:
  242. *size = 1024 * 1024;
  243. break;
  244. case GEN8_STOLEN_RESERVED_2M:
  245. *size = 2 * 1024 * 1024;
  246. break;
  247. case GEN8_STOLEN_RESERVED_4M:
  248. *size = 4 * 1024 * 1024;
  249. break;
  250. case GEN8_STOLEN_RESERVED_8M:
  251. *size = 8 * 1024 * 1024;
  252. break;
  253. default:
  254. *size = 8 * 1024 * 1024;
  255. MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
  256. }
  257. }
  258. static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
  259. resource_size_t *base, resource_size_t *size)
  260. {
  261. uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  262. resource_size_t stolen_top;
  263. if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
  264. *base = 0;
  265. *size = 0;
  266. return;
  267. }
  268. stolen_top = dev_priv->dsm.end + 1;
  269. *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  270. /* On these platforms, the register doesn't have a size field, so the
  271. * size is the distance between the base and the top of the stolen
  272. * memory. We also have the genuine case where base is zero and there's
  273. * nothing reserved. */
  274. if (*base == 0)
  275. *size = 0;
  276. else
  277. *size = stolen_top - *base;
  278. }
  279. int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
  280. {
  281. resource_size_t reserved_base, stolen_top;
  282. resource_size_t reserved_total, reserved_size;
  283. resource_size_t stolen_usable_start;
  284. mutex_init(&dev_priv->mm.stolen_lock);
  285. if (intel_vgpu_active(dev_priv)) {
  286. DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
  287. return 0;
  288. }
  289. if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
  290. DRM_INFO("DMAR active, disabling use of stolen memory\n");
  291. return 0;
  292. }
  293. if (resource_size(&intel_graphics_stolen_res) == 0)
  294. return 0;
  295. dev_priv->dsm = intel_graphics_stolen_res;
  296. if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
  297. return 0;
  298. GEM_BUG_ON(dev_priv->dsm.start == 0);
  299. GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
  300. stolen_top = dev_priv->dsm.end + 1;
  301. reserved_base = 0;
  302. reserved_size = 0;
  303. switch (INTEL_GEN(dev_priv)) {
  304. case 2:
  305. case 3:
  306. break;
  307. case 4:
  308. if (!IS_G4X(dev_priv))
  309. break;
  310. /* fall through */
  311. case 5:
  312. g4x_get_stolen_reserved(dev_priv,
  313. &reserved_base, &reserved_size);
  314. break;
  315. case 6:
  316. gen6_get_stolen_reserved(dev_priv,
  317. &reserved_base, &reserved_size);
  318. break;
  319. case 7:
  320. gen7_get_stolen_reserved(dev_priv,
  321. &reserved_base, &reserved_size);
  322. break;
  323. default:
  324. if (IS_LP(dev_priv))
  325. chv_get_stolen_reserved(dev_priv,
  326. &reserved_base, &reserved_size);
  327. else
  328. bdw_get_stolen_reserved(dev_priv,
  329. &reserved_base, &reserved_size);
  330. break;
  331. }
  332. /* It is possible for the reserved base to be zero, but the register
  333. * field for size doesn't have a zero option. */
  334. if (reserved_base == 0) {
  335. reserved_size = 0;
  336. reserved_base = stolen_top;
  337. }
  338. dev_priv->dsm_reserved =
  339. (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
  340. if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
  341. DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
  342. &dev_priv->dsm_reserved, &dev_priv->dsm);
  343. return 0;
  344. }
  345. /* It is possible for the reserved area to end before the end of stolen
  346. * memory, so just consider the start. */
  347. reserved_total = stolen_top - reserved_base;
  348. DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n",
  349. (u64)resource_size(&dev_priv->dsm) >> 10,
  350. ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
  351. stolen_usable_start = 0;
  352. /* WaSkipStolenMemoryFirstPage:bdw+ */
  353. if (INTEL_GEN(dev_priv) >= 8)
  354. stolen_usable_start = 4096;
  355. dev_priv->stolen_usable_size =
  356. resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start;
  357. /* Basic memrange allocator for stolen space. */
  358. drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
  359. dev_priv->stolen_usable_size);
  360. return 0;
  361. }
  362. static struct sg_table *
  363. i915_pages_create_for_stolen(struct drm_device *dev,
  364. resource_size_t offset, resource_size_t size)
  365. {
  366. struct drm_i915_private *dev_priv = to_i915(dev);
  367. struct sg_table *st;
  368. struct scatterlist *sg;
  369. GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
  370. /* We hide that we have no struct page backing our stolen object
  371. * by wrapping the contiguous physical allocation with a fake
  372. * dma mapping in a single scatterlist.
  373. */
  374. st = kmalloc(sizeof(*st), GFP_KERNEL);
  375. if (st == NULL)
  376. return ERR_PTR(-ENOMEM);
  377. if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  378. kfree(st);
  379. return ERR_PTR(-ENOMEM);
  380. }
  381. sg = st->sgl;
  382. sg->offset = 0;
  383. sg->length = size;
  384. sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
  385. sg_dma_len(sg) = size;
  386. return st;
  387. }
  388. static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
  389. {
  390. struct sg_table *pages =
  391. i915_pages_create_for_stolen(obj->base.dev,
  392. obj->stolen->start,
  393. obj->stolen->size);
  394. if (IS_ERR(pages))
  395. return PTR_ERR(pages);
  396. __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
  397. return 0;
  398. }
  399. static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
  400. struct sg_table *pages)
  401. {
  402. /* Should only be called from i915_gem_object_release_stolen() */
  403. sg_free_table(pages);
  404. kfree(pages);
  405. }
  406. static void
  407. i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
  408. {
  409. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  410. struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
  411. GEM_BUG_ON(!stolen);
  412. __i915_gem_object_unpin_pages(obj);
  413. i915_gem_stolen_remove_node(dev_priv, stolen);
  414. kfree(stolen);
  415. }
  416. static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
  417. .get_pages = i915_gem_object_get_pages_stolen,
  418. .put_pages = i915_gem_object_put_pages_stolen,
  419. .release = i915_gem_object_release_stolen,
  420. };
  421. static struct drm_i915_gem_object *
  422. _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  423. struct drm_mm_node *stolen)
  424. {
  425. struct drm_i915_gem_object *obj;
  426. unsigned int cache_level;
  427. obj = i915_gem_object_alloc(dev_priv);
  428. if (obj == NULL)
  429. return NULL;
  430. drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
  431. i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
  432. obj->stolen = stolen;
  433. obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
  434. cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
  435. i915_gem_object_set_cache_coherency(obj, cache_level);
  436. if (i915_gem_object_pin_pages(obj))
  437. goto cleanup;
  438. return obj;
  439. cleanup:
  440. i915_gem_object_free(obj);
  441. return NULL;
  442. }
  443. struct drm_i915_gem_object *
  444. i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
  445. resource_size_t size)
  446. {
  447. struct drm_i915_gem_object *obj;
  448. struct drm_mm_node *stolen;
  449. int ret;
  450. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  451. return NULL;
  452. if (size == 0)
  453. return NULL;
  454. stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  455. if (!stolen)
  456. return NULL;
  457. ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
  458. if (ret) {
  459. kfree(stolen);
  460. return NULL;
  461. }
  462. obj = _i915_gem_object_create_stolen(dev_priv, stolen);
  463. if (obj)
  464. return obj;
  465. i915_gem_stolen_remove_node(dev_priv, stolen);
  466. kfree(stolen);
  467. return NULL;
  468. }
  469. struct drm_i915_gem_object *
  470. i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
  471. resource_size_t stolen_offset,
  472. resource_size_t gtt_offset,
  473. resource_size_t size)
  474. {
  475. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  476. struct drm_i915_gem_object *obj;
  477. struct drm_mm_node *stolen;
  478. struct i915_vma *vma;
  479. int ret;
  480. if (!drm_mm_initialized(&dev_priv->mm.stolen))
  481. return NULL;
  482. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  483. DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
  484. &stolen_offset, &gtt_offset, &size);
  485. /* KISS and expect everything to be page-aligned */
  486. if (WARN_ON(size == 0) ||
  487. WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
  488. WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
  489. return NULL;
  490. stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  491. if (!stolen)
  492. return NULL;
  493. stolen->start = stolen_offset;
  494. stolen->size = size;
  495. mutex_lock(&dev_priv->mm.stolen_lock);
  496. ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
  497. mutex_unlock(&dev_priv->mm.stolen_lock);
  498. if (ret) {
  499. DRM_DEBUG_KMS("failed to allocate stolen space\n");
  500. kfree(stolen);
  501. return NULL;
  502. }
  503. obj = _i915_gem_object_create_stolen(dev_priv, stolen);
  504. if (obj == NULL) {
  505. DRM_DEBUG_KMS("failed to allocate stolen object\n");
  506. i915_gem_stolen_remove_node(dev_priv, stolen);
  507. kfree(stolen);
  508. return NULL;
  509. }
  510. /* Some objects just need physical mem from stolen space */
  511. if (gtt_offset == I915_GTT_OFFSET_NONE)
  512. return obj;
  513. ret = i915_gem_object_pin_pages(obj);
  514. if (ret)
  515. goto err;
  516. vma = i915_vma_instance(obj, &ggtt->base, NULL);
  517. if (IS_ERR(vma)) {
  518. ret = PTR_ERR(vma);
  519. goto err_pages;
  520. }
  521. /* To simplify the initialisation sequence between KMS and GTT,
  522. * we allow construction of the stolen object prior to
  523. * setting up the GTT space. The actual reservation will occur
  524. * later.
  525. */
  526. ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
  527. size, gtt_offset, obj->cache_level,
  528. 0);
  529. if (ret) {
  530. DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
  531. goto err_pages;
  532. }
  533. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  534. vma->pages = obj->mm.pages;
  535. vma->flags |= I915_VMA_GLOBAL_BIND;
  536. __i915_vma_set_map_and_fenceable(vma);
  537. list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
  538. spin_lock(&dev_priv->mm.obj_lock);
  539. list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
  540. obj->bind_count++;
  541. spin_unlock(&dev_priv->mm.obj_lock);
  542. return obj;
  543. err_pages:
  544. i915_gem_object_unpin_pages(obj);
  545. err:
  546. i915_gem_object_put(obj);
  547. return NULL;
  548. }