acr_r352.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. /*
  2. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "acr_r352.h"
  23. #include "hs_ucode.h"
  24. #include <core/gpuobj.h>
  25. #include <core/firmware.h>
  26. #include <engine/falcon.h>
  27. #include <subdev/pmu.h>
  28. #include <core/msgqueue.h>
  29. #include <engine/sec2.h>
  30. /**
  31. * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
  32. * @signature: 16B signature for secure code. 0s if no secure code
  33. * @ctx_dma: DMA context to be used by BL while loading code/data
  34. * @code_dma_base: 256B-aligned Physical FB Address where code is located
  35. * (falcon's $xcbase register)
  36. * @non_sec_code_off: offset from code_dma_base where the non-secure code is
  37. * located. The offset must be multiple of 256 to help perf
  38. * @non_sec_code_size: the size of the nonSecure code part.
  39. * @sec_code_off: offset from code_dma_base where the secure code is
  40. * located. The offset must be multiple of 256 to help perf
  41. * @sec_code_size: offset from code_dma_base where the secure code is
  42. * located. The offset must be multiple of 256 to help perf
  43. * @code_entry_point: code entry point which will be invoked by BL after
  44. * code is loaded.
  45. * @data_dma_base: 256B aligned Physical FB Address where data is located.
  46. * (falcon's $xdbase register)
  47. * @data_size: size of data block. Should be multiple of 256B
  48. *
  49. * Structure used by the bootloader to load the rest of the code. This has
  50. * to be filled by host and copied into DMEM at offset provided in the
  51. * hsflcn_bl_desc.bl_desc_dmem_load_off.
  52. */
  53. struct acr_r352_flcn_bl_desc {
  54. u32 reserved[4];
  55. u32 signature[4];
  56. u32 ctx_dma;
  57. u32 code_dma_base;
  58. u32 non_sec_code_off;
  59. u32 non_sec_code_size;
  60. u32 sec_code_off;
  61. u32 sec_code_size;
  62. u32 code_entry_point;
  63. u32 data_dma_base;
  64. u32 data_size;
  65. u32 code_dma_base1;
  66. u32 data_dma_base1;
  67. };
  68. /**
  69. * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image
  70. */
  71. static void
  72. acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
  73. const struct ls_ucode_img *img, u64 wpr_addr,
  74. void *_desc)
  75. {
  76. struct acr_r352_flcn_bl_desc *desc = _desc;
  77. const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
  78. u64 base, addr_code, addr_data;
  79. base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
  80. addr_code = (base + pdesc->app_resident_code_offset) >> 8;
  81. addr_data = (base + pdesc->app_resident_data_offset) >> 8;
  82. desc->ctx_dma = FALCON_DMAIDX_UCODE;
  83. desc->code_dma_base = lower_32_bits(addr_code);
  84. desc->code_dma_base1 = upper_32_bits(addr_code);
  85. desc->non_sec_code_off = pdesc->app_resident_code_offset;
  86. desc->non_sec_code_size = pdesc->app_resident_code_size;
  87. desc->code_entry_point = pdesc->app_imem_entry;
  88. desc->data_dma_base = lower_32_bits(addr_data);
  89. desc->data_dma_base1 = upper_32_bits(addr_data);
  90. desc->data_size = pdesc->app_resident_data_size;
  91. }
  92. /**
  93. * struct hsflcn_acr_desc - data section of the HS firmware
  94. *
  95. * This header is to be copied at the beginning of DMEM by the HS bootloader.
  96. *
  97. * @signature: signature of ACR ucode
  98. * @wpr_region_id: region ID holding the WPR header and its details
  99. * @wpr_offset: offset from the WPR region holding the wpr header
  100. * @regions: region descriptors
  101. * @nonwpr_ucode_blob_size: size of LS blob
  102. * @nonwpr_ucode_blob_start: FB location of LS blob is
  103. */
  104. struct hsflcn_acr_desc {
  105. union {
  106. u8 reserved_dmem[0x200];
  107. u32 signatures[4];
  108. } ucode_reserved_space;
  109. u32 wpr_region_id;
  110. u32 wpr_offset;
  111. u32 mmu_mem_range;
  112. #define FLCN_ACR_MAX_REGIONS 2
  113. struct {
  114. u32 no_regions;
  115. struct {
  116. u32 start_addr;
  117. u32 end_addr;
  118. u32 region_id;
  119. u32 read_mask;
  120. u32 write_mask;
  121. u32 client_mask;
  122. } region_props[FLCN_ACR_MAX_REGIONS];
  123. } regions;
  124. u32 ucode_blob_size;
  125. u64 ucode_blob_base __aligned(8);
  126. struct {
  127. u32 vpr_enabled;
  128. u32 vpr_start;
  129. u32 vpr_end;
  130. u32 hdcp_policies;
  131. } vpr_desc;
  132. };
  133. /*
  134. * Low-secure blob creation
  135. */
  136. /**
  137. * struct acr_r352_lsf_lsb_header - LS firmware header
  138. * @signature: signature to verify the firmware against
  139. * @ucode_off: offset of the ucode blob in the WPR region. The ucode
  140. * blob contains the bootloader, code and data of the
  141. * LS falcon
  142. * @ucode_size: size of the ucode blob, including bootloader
  143. * @data_size: size of the ucode blob data
  144. * @bl_code_size: size of the bootloader code
  145. * @bl_imem_off: offset in imem of the bootloader
  146. * @bl_data_off: offset of the bootloader data in WPR region
  147. * @bl_data_size: size of the bootloader data
  148. * @app_code_off: offset of the app code relative to ucode_off
  149. * @app_code_size: size of the app code
  150. * @app_data_off: offset of the app data relative to ucode_off
  151. * @app_data_size: size of the app data
  152. * @flags: flags for the secure bootloader
  153. *
  154. * This structure is written into the WPR region for each managed falcon. Each
  155. * instance is referenced by the lsb_offset member of the corresponding
  156. * lsf_wpr_header.
  157. */
  158. struct acr_r352_lsf_lsb_header {
  159. /**
  160. * LS falcon signatures
  161. * @prd_keys: signature to use in production mode
  162. * @dgb_keys: signature to use in debug mode
  163. * @b_prd_present: whether the production key is present
  164. * @b_dgb_present: whether the debug key is present
  165. * @falcon_id: ID of the falcon the ucode applies to
  166. */
  167. struct {
  168. u8 prd_keys[2][16];
  169. u8 dbg_keys[2][16];
  170. u32 b_prd_present;
  171. u32 b_dbg_present;
  172. u32 falcon_id;
  173. } signature;
  174. u32 ucode_off;
  175. u32 ucode_size;
  176. u32 data_size;
  177. u32 bl_code_size;
  178. u32 bl_imem_off;
  179. u32 bl_data_off;
  180. u32 bl_data_size;
  181. u32 app_code_off;
  182. u32 app_code_size;
  183. u32 app_data_off;
  184. u32 app_data_size;
  185. u32 flags;
  186. };
  187. /**
  188. * struct acr_r352_lsf_wpr_header - LS blob WPR Header
  189. * @falcon_id: LS falcon ID
  190. * @lsb_offset: offset of the lsb_lsf_header in the WPR region
  191. * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
  192. * @lazy_bootstrap: skip bootstrapping by ACR
  193. * @status: bootstrapping status
  194. *
  195. * An array of these is written at the beginning of the WPR region, one for
  196. * each managed falcon. The array is terminated by an instance which falcon_id
  197. * is LSF_FALCON_ID_INVALID.
  198. */
  199. struct acr_r352_lsf_wpr_header {
  200. u32 falcon_id;
  201. u32 lsb_offset;
  202. u32 bootstrap_owner;
  203. u32 lazy_bootstrap;
  204. u32 status;
  205. #define LSF_IMAGE_STATUS_NONE 0
  206. #define LSF_IMAGE_STATUS_COPY 1
  207. #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
  208. #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
  209. #define LSF_IMAGE_STATUS_VALIDATION_DONE 4
  210. #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
  211. #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
  212. };
  213. /**
  214. * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
  215. */
  216. struct ls_ucode_img_r352 {
  217. struct ls_ucode_img base;
  218. struct acr_r352_lsf_wpr_header wpr_header;
  219. struct acr_r352_lsf_lsb_header lsb_header;
  220. };
  221. #define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
  222. /**
  223. * ls_ucode_img_load() - create a lsf_ucode_img and load it
  224. */
  225. struct ls_ucode_img *
  226. acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
  227. const struct nvkm_secboot *sb,
  228. enum nvkm_secboot_falcon falcon_id)
  229. {
  230. const struct nvkm_subdev *subdev = acr->base.subdev;
  231. struct ls_ucode_img_r352 *img;
  232. int ret;
  233. img = kzalloc(sizeof(*img), GFP_KERNEL);
  234. if (!img)
  235. return ERR_PTR(-ENOMEM);
  236. img->base.falcon_id = falcon_id;
  237. ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
  238. if (ret) {
  239. kfree(img->base.ucode_data);
  240. kfree(img->base.sig);
  241. kfree(img);
  242. return ERR_PTR(ret);
  243. }
  244. /* Check that the signature size matches our expectations... */
  245. if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
  246. nvkm_error(subdev, "invalid signature size for %s falcon!\n",
  247. nvkm_secboot_falcon_name[falcon_id]);
  248. return ERR_PTR(-EINVAL);
  249. }
  250. /* Copy signature to the right place */
  251. memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
  252. /* not needed? the signature should already have the right value */
  253. img->lsb_header.signature.falcon_id = falcon_id;
  254. return &img->base;
  255. }
  256. #define LSF_LSB_HEADER_ALIGN 256
  257. #define LSF_BL_DATA_ALIGN 256
  258. #define LSF_BL_DATA_SIZE_ALIGN 256
  259. #define LSF_BL_CODE_SIZE_ALIGN 256
  260. #define LSF_UCODE_DATA_ALIGN 4096
  261. /**
  262. * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image
  263. * @acr: ACR to use
  264. * @img: image to generate for
  265. * @offset: offset in the WPR region where this image starts
  266. *
  267. * Allocate space in the WPR area from offset and write the WPR and LSB headers
  268. * accordingly.
  269. *
  270. * Return: offset at the end of this image.
  271. */
  272. static u32
  273. acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
  274. struct ls_ucode_img_r352 *img, u32 offset)
  275. {
  276. struct ls_ucode_img *_img = &img->base;
  277. struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
  278. struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
  279. struct ls_ucode_img_desc *desc = &_img->ucode_desc;
  280. const struct acr_r352_ls_func *func =
  281. acr->func->ls_func[_img->falcon_id];
  282. /* Fill WPR header */
  283. whdr->falcon_id = _img->falcon_id;
  284. whdr->bootstrap_owner = acr->base.boot_falcon;
  285. whdr->status = LSF_IMAGE_STATUS_COPY;
  286. /* Skip bootstrapping falcons started by someone else than ACR */
  287. if (acr->lazy_bootstrap & BIT(_img->falcon_id))
  288. whdr->lazy_bootstrap = 1;
  289. /* Align, save off, and include an LSB header size */
  290. offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
  291. whdr->lsb_offset = offset;
  292. offset += sizeof(*lhdr);
  293. /*
  294. * Align, save off, and include the original (static) ucode
  295. * image size
  296. */
  297. offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
  298. _img->ucode_off = lhdr->ucode_off = offset;
  299. offset += _img->ucode_size;
  300. /*
  301. * For falcons that use a boot loader (BL), we append a loader
  302. * desc structure on the end of the ucode image and consider
  303. * this the boot loader data. The host will then copy the loader
  304. * desc args to this space within the WPR region (before locking
  305. * down) and the HS bin will then copy them to DMEM 0 for the
  306. * loader.
  307. */
  308. lhdr->bl_code_size = ALIGN(desc->bootloader_size,
  309. LSF_BL_CODE_SIZE_ALIGN);
  310. lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
  311. LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
  312. lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
  313. lhdr->bl_code_size - lhdr->ucode_size;
  314. /*
  315. * Though the BL is located at 0th offset of the image, the VA
  316. * is different to make sure that it doesn't collide the actual
  317. * OS VA range
  318. */
  319. lhdr->bl_imem_off = desc->bootloader_imem_offset;
  320. lhdr->app_code_off = desc->app_start_offset +
  321. desc->app_resident_code_offset;
  322. lhdr->app_code_size = desc->app_resident_code_size;
  323. lhdr->app_data_off = desc->app_start_offset +
  324. desc->app_resident_data_offset;
  325. lhdr->app_data_size = desc->app_resident_data_size;
  326. lhdr->flags = func->lhdr_flags;
  327. if (_img->falcon_id == acr->base.boot_falcon)
  328. lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
  329. /* Align and save off BL descriptor size */
  330. lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
  331. /*
  332. * Align, save off, and include the additional BL data
  333. */
  334. offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
  335. lhdr->bl_data_off = offset;
  336. offset += lhdr->bl_data_size;
  337. return offset;
  338. }
  339. /**
  340. * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images
  341. */
  342. int
  343. acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
  344. {
  345. struct ls_ucode_img_r352 *img;
  346. struct list_head *l;
  347. u32 count = 0;
  348. u32 offset;
  349. /* Count the number of images to manage */
  350. list_for_each(l, imgs)
  351. count++;
  352. /*
  353. * Start with an array of WPR headers at the base of the WPR.
  354. * The expectation here is that the secure falcon will do a single DMA
  355. * read of this array and cache it internally so it's ok to pack these.
  356. * Also, we add 1 to the falcon count to indicate the end of the array.
  357. */
  358. offset = sizeof(img->wpr_header) * (count + 1);
  359. /*
  360. * Walk the managed falcons, accounting for the LSB structs
  361. * as well as the ucode images.
  362. */
  363. list_for_each_entry(img, imgs, base.node) {
  364. offset = acr_r352_ls_img_fill_headers(acr, img, offset);
  365. }
  366. return offset;
  367. }
  368. /**
  369. * acr_r352_ls_write_wpr - write the WPR blob contents
  370. */
  371. int
  372. acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
  373. struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
  374. {
  375. struct ls_ucode_img *_img;
  376. u32 pos = 0;
  377. nvkm_kmap(wpr_blob);
  378. list_for_each_entry(_img, imgs, node) {
  379. struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
  380. const struct acr_r352_ls_func *ls_func =
  381. acr->func->ls_func[_img->falcon_id];
  382. u8 gdesc[ls_func->bl_desc_size];
  383. nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
  384. sizeof(img->wpr_header));
  385. nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
  386. &img->lsb_header, sizeof(img->lsb_header));
  387. /* Generate and write BL descriptor */
  388. memset(gdesc, 0, ls_func->bl_desc_size);
  389. ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
  390. nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
  391. gdesc, ls_func->bl_desc_size);
  392. /* Copy ucode */
  393. nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
  394. _img->ucode_data, _img->ucode_size);
  395. pos += sizeof(img->wpr_header);
  396. }
  397. nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
  398. nvkm_done(wpr_blob);
  399. return 0;
  400. }
  401. /* Both size and address of WPR need to be 256K-aligned */
  402. #define WPR_ALIGNMENT 0x40000
  403. /**
  404. * acr_r352_prepare_ls_blob() - prepare the LS blob
  405. *
  406. * For each securely managed falcon, load the FW, signatures and bootloaders and
  407. * prepare a ucode blob. Then, compute the offsets in the WPR region for each
  408. * blob, and finally write the headers and ucode blobs into a GPU object that
  409. * will be copied into the WPR region by the HS firmware.
  410. */
  411. static int
  412. acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
  413. {
  414. const struct nvkm_subdev *subdev = acr->base.subdev;
  415. struct list_head imgs;
  416. struct ls_ucode_img *img, *t;
  417. unsigned long managed_falcons = acr->base.managed_falcons;
  418. u64 wpr_addr = sb->wpr_addr;
  419. u32 wpr_size = sb->wpr_size;
  420. int managed_count = 0;
  421. u32 image_wpr_size, ls_blob_size;
  422. int falcon_id;
  423. int ret;
  424. INIT_LIST_HEAD(&imgs);
  425. /* Load all LS blobs */
  426. for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
  427. struct ls_ucode_img *img;
  428. img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
  429. if (IS_ERR(img)) {
  430. if (acr->base.optional_falcons & BIT(falcon_id)) {
  431. managed_falcons &= ~BIT(falcon_id);
  432. nvkm_info(subdev, "skipping %s falcon...\n",
  433. nvkm_secboot_falcon_name[falcon_id]);
  434. continue;
  435. }
  436. ret = PTR_ERR(img);
  437. goto cleanup;
  438. }
  439. list_add_tail(&img->node, &imgs);
  440. managed_count++;
  441. }
  442. /* Commit the actual list of falcons we will manage from now on */
  443. acr->base.managed_falcons = managed_falcons;
  444. /*
  445. * If the boot falcon has a firmare, let it manage the bootstrap of other
  446. * falcons.
  447. */
  448. if (acr->func->ls_func[acr->base.boot_falcon] &&
  449. (managed_falcons & BIT(acr->base.boot_falcon))) {
  450. for_each_set_bit(falcon_id, &managed_falcons,
  451. NVKM_SECBOOT_FALCON_END) {
  452. if (falcon_id == acr->base.boot_falcon)
  453. continue;
  454. acr->lazy_bootstrap |= BIT(falcon_id);
  455. }
  456. }
  457. /*
  458. * Fill the WPR and LSF headers with the right offsets and compute
  459. * required WPR size
  460. */
  461. image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
  462. image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
  463. ls_blob_size = image_wpr_size;
  464. /*
  465. * If we need a shadow area, allocate twice the size and use the
  466. * upper half as WPR
  467. */
  468. if (wpr_size == 0 && acr->func->shadow_blob)
  469. ls_blob_size *= 2;
  470. /* Allocate GPU object that will contain the WPR region */
  471. ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
  472. false, NULL, &acr->ls_blob);
  473. if (ret)
  474. goto cleanup;
  475. nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n",
  476. managed_count, image_wpr_size);
  477. /* If WPR address and size are not fixed, set them to fit the LS blob */
  478. if (wpr_size == 0) {
  479. wpr_addr = acr->ls_blob->addr;
  480. if (acr->func->shadow_blob)
  481. wpr_addr += acr->ls_blob->size / 2;
  482. wpr_size = image_wpr_size;
  483. /*
  484. * But if the WPR region is set by the bootloader, it is illegal for
  485. * the HS blob to be larger than this region.
  486. */
  487. } else if (image_wpr_size > wpr_size) {
  488. nvkm_error(subdev, "WPR region too small for FW blob!\n");
  489. nvkm_error(subdev, "required: %dB\n", image_wpr_size);
  490. nvkm_error(subdev, "available: %dB\n", wpr_size);
  491. ret = -ENOSPC;
  492. goto cleanup;
  493. }
  494. /* Write LS blob */
  495. ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr);
  496. if (ret)
  497. nvkm_gpuobj_del(&acr->ls_blob);
  498. cleanup:
  499. list_for_each_entry_safe(img, t, &imgs, node) {
  500. kfree(img->ucode_data);
  501. kfree(img->sig);
  502. kfree(img);
  503. }
  504. return ret;
  505. }
  506. void
  507. acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
  508. void *_desc)
  509. {
  510. struct hsflcn_acr_desc *desc = _desc;
  511. struct nvkm_gpuobj *ls_blob = acr->ls_blob;
  512. /* WPR region information if WPR is not fixed */
  513. if (sb->wpr_size == 0) {
  514. u64 wpr_start = ls_blob->addr;
  515. u64 wpr_end = wpr_start + ls_blob->size;
  516. desc->wpr_region_id = 1;
  517. desc->regions.no_regions = 2;
  518. desc->regions.region_props[0].start_addr = wpr_start >> 8;
  519. desc->regions.region_props[0].end_addr = wpr_end >> 8;
  520. desc->regions.region_props[0].region_id = 1;
  521. desc->regions.region_props[0].read_mask = 0xf;
  522. desc->regions.region_props[0].write_mask = 0xc;
  523. desc->regions.region_props[0].client_mask = 0x2;
  524. } else {
  525. desc->ucode_blob_base = ls_blob->addr;
  526. desc->ucode_blob_size = ls_blob->size;
  527. }
  528. }
  529. static void
  530. acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
  531. u64 offset)
  532. {
  533. struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc;
  534. u64 addr_code, addr_data;
  535. addr_code = offset >> 8;
  536. addr_data = (offset + hdr->data_dma_base) >> 8;
  537. bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
  538. bl_desc->code_dma_base = lower_32_bits(addr_code);
  539. bl_desc->non_sec_code_off = hdr->non_sec_code_off;
  540. bl_desc->non_sec_code_size = hdr->non_sec_code_size;
  541. bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
  542. bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
  543. bl_desc->code_entry_point = 0;
  544. bl_desc->data_dma_base = lower_32_bits(addr_data);
  545. bl_desc->data_size = hdr->data_size;
  546. }
  547. /**
  548. * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor
  549. *
  550. * @sb secure boot instance to prepare for
  551. * @fw name of the HS firmware to load
  552. * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
  553. * @bl_desc pointer to the BL descriptor to write for this firmware
  554. * @patch whether we should patch the HS descriptor (only for HS loaders)
  555. */
  556. static int
  557. acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
  558. const char *fw, struct nvkm_gpuobj **blob,
  559. struct hsf_load_header *load_header, bool patch)
  560. {
  561. struct nvkm_subdev *subdev = &sb->subdev;
  562. void *acr_image;
  563. struct fw_bin_header *hsbin_hdr;
  564. struct hsf_fw_header *fw_hdr;
  565. struct hsf_load_header *load_hdr;
  566. void *acr_data;
  567. int ret;
  568. acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
  569. if (IS_ERR(acr_image))
  570. return PTR_ERR(acr_image);
  571. hsbin_hdr = acr_image;
  572. fw_hdr = acr_image + hsbin_hdr->header_offset;
  573. load_hdr = acr_image + fw_hdr->hdr_offset;
  574. acr_data = acr_image + hsbin_hdr->data_offset;
  575. /* Patch descriptor with WPR information? */
  576. if (patch) {
  577. struct hsflcn_acr_desc *desc;
  578. desc = acr_data + load_hdr->data_dma_base;
  579. acr->func->fixup_hs_desc(acr, sb, desc);
  580. }
  581. if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
  582. nvkm_error(subdev, "more apps (%d) than supported (%d)!",
  583. load_hdr->num_apps, ACR_R352_MAX_APPS);
  584. ret = -EINVAL;
  585. goto cleanup;
  586. }
  587. memcpy(load_header, load_hdr, sizeof(*load_header) +
  588. (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
  589. /* Create ACR blob and copy HS data to it */
  590. ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
  591. 0x1000, false, NULL, blob);
  592. if (ret)
  593. goto cleanup;
  594. nvkm_kmap(*blob);
  595. nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
  596. nvkm_done(*blob);
  597. cleanup:
  598. kfree(acr_image);
  599. return ret;
  600. }
  601. /**
  602. * acr_r352_load_blobs - load blobs common to all ACR V1 versions.
  603. *
  604. * This includes the LS blob, HS ucode loading blob, and HS bootloader.
  605. *
  606. * The HS ucode unload blob is only used on dGPU if the WPR region is variable.
  607. */
  608. int
  609. acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
  610. {
  611. struct nvkm_subdev *subdev = &sb->subdev;
  612. int ret;
  613. /* Firmware already loaded? */
  614. if (acr->firmware_ok)
  615. return 0;
  616. /* Load and prepare the managed falcon's firmwares */
  617. ret = acr_r352_prepare_ls_blob(acr, sb);
  618. if (ret)
  619. return ret;
  620. /* Load the HS firmware that will load the LS firmwares */
  621. if (!acr->load_blob) {
  622. ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load",
  623. &acr->load_blob,
  624. &acr->load_bl_header, true);
  625. if (ret)
  626. return ret;
  627. }
  628. /* If the ACR region is dynamically programmed, we need an unload FW */
  629. if (sb->wpr_size == 0) {
  630. ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload",
  631. &acr->unload_blob,
  632. &acr->unload_bl_header, false);
  633. if (ret)
  634. return ret;
  635. }
  636. /* Load the HS firmware bootloader */
  637. if (!acr->hsbl_blob) {
  638. acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
  639. if (IS_ERR(acr->hsbl_blob)) {
  640. ret = PTR_ERR(acr->hsbl_blob);
  641. acr->hsbl_blob = NULL;
  642. return ret;
  643. }
  644. if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
  645. acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
  646. "acr/unload_bl", 0);
  647. if (IS_ERR(acr->hsbl_unload_blob)) {
  648. ret = PTR_ERR(acr->hsbl_unload_blob);
  649. acr->hsbl_unload_blob = NULL;
  650. return ret;
  651. }
  652. } else {
  653. acr->hsbl_unload_blob = acr->hsbl_blob;
  654. }
  655. }
  656. acr->firmware_ok = true;
  657. nvkm_debug(&sb->subdev, "LS blob successfully created\n");
  658. return 0;
  659. }
  660. /**
  661. * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
  662. *
  663. * Returns the start address to use, or a negative error value.
  664. */
  665. static int
  666. acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
  667. struct nvkm_gpuobj *blob, u64 offset)
  668. {
  669. struct acr_r352 *acr = acr_r352(_acr);
  670. const u32 bl_desc_size = acr->func->hs_bl_desc_size;
  671. const struct hsf_load_header *load_hdr;
  672. struct fw_bin_header *bl_hdr;
  673. struct fw_bl_desc *hsbl_desc;
  674. void *bl, *blob_data, *hsbl_code, *hsbl_data;
  675. u32 code_size;
  676. u8 bl_desc[bl_desc_size];
  677. /* Find the bootloader descriptor for our blob and copy it */
  678. if (blob == acr->load_blob) {
  679. load_hdr = &acr->load_bl_header;
  680. bl = acr->hsbl_blob;
  681. } else if (blob == acr->unload_blob) {
  682. load_hdr = &acr->unload_bl_header;
  683. bl = acr->hsbl_unload_blob;
  684. } else {
  685. nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
  686. return -EINVAL;
  687. }
  688. bl_hdr = bl;
  689. hsbl_desc = bl + bl_hdr->header_offset;
  690. blob_data = bl + bl_hdr->data_offset;
  691. hsbl_code = blob_data + hsbl_desc->code_off;
  692. hsbl_data = blob_data + hsbl_desc->data_off;
  693. code_size = ALIGN(hsbl_desc->code_size, 256);
  694. /*
  695. * Copy HS bootloader data
  696. */
  697. nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0);
  698. /* Copy HS bootloader code to end of IMEM */
  699. nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size,
  700. code_size, hsbl_desc->start_tag, 0, false);
  701. /* Generate the BL header */
  702. memset(bl_desc, 0, bl_desc_size);
  703. acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
  704. /*
  705. * Copy HS BL header where the HS descriptor expects it to be
  706. */
  707. nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
  708. bl_desc_size, 0);
  709. return hsbl_desc->start_tag << 8;
  710. }
  711. static int
  712. acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
  713. {
  714. struct nvkm_subdev *subdev = &sb->subdev;
  715. int i;
  716. /* Run the unload blob to unprotect the WPR region */
  717. if (acr->unload_blob && sb->wpr_set) {
  718. int ret;
  719. nvkm_debug(subdev, "running HS unload blob\n");
  720. ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
  721. if (ret < 0)
  722. return ret;
  723. /*
  724. * Unload blob will return this error code - it is not an error
  725. * and the expected behavior on RM as well
  726. */
  727. if (ret && ret != 0x1d) {
  728. nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
  729. return -EINVAL;
  730. }
  731. nvkm_debug(subdev, "HS unload blob completed\n");
  732. }
  733. for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
  734. acr->falcon_state[i] = NON_SECURE;
  735. sb->wpr_set = false;
  736. return 0;
  737. }
  738. /**
  739. * Check if the WPR region has been indeed set by the ACR firmware, and
  740. * matches where it should be.
  741. */
  742. static bool
  743. acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
  744. {
  745. const struct nvkm_subdev *subdev = &sb->subdev;
  746. const struct nvkm_device *device = subdev->device;
  747. u64 wpr_lo, wpr_hi;
  748. u64 wpr_range_lo, wpr_range_hi;
  749. nvkm_wr32(device, 0x100cd4, 0x2);
  750. wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
  751. wpr_lo <<= 8;
  752. nvkm_wr32(device, 0x100cd4, 0x3);
  753. wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
  754. wpr_hi <<= 8;
  755. if (sb->wpr_size != 0) {
  756. wpr_range_lo = sb->wpr_addr;
  757. wpr_range_hi = wpr_range_lo + sb->wpr_size;
  758. } else {
  759. wpr_range_lo = acr->ls_blob->addr;
  760. wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
  761. }
  762. return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
  763. wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
  764. }
  765. static int
  766. acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
  767. {
  768. const struct nvkm_subdev *subdev = &sb->subdev;
  769. unsigned long managed_falcons = acr->base.managed_falcons;
  770. int falcon_id;
  771. int ret;
  772. if (sb->wpr_set)
  773. return 0;
  774. /* Make sure all blobs are ready */
  775. ret = acr_r352_load_blobs(acr, sb);
  776. if (ret)
  777. return ret;
  778. nvkm_debug(subdev, "running HS load blob\n");
  779. ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
  780. /* clear halt interrupt */
  781. nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
  782. sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
  783. if (ret < 0) {
  784. return ret;
  785. } else if (ret > 0) {
  786. nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
  787. return -EINVAL;
  788. }
  789. nvkm_debug(subdev, "HS load blob completed\n");
  790. /* WPR must be set at this point */
  791. if (!sb->wpr_set) {
  792. nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
  793. return -EINVAL;
  794. }
  795. /* Run LS firmwares post_run hooks */
  796. for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
  797. const struct acr_r352_ls_func *func =
  798. acr->func->ls_func[falcon_id];
  799. if (func->post_run) {
  800. ret = func->post_run(&acr->base, sb);
  801. if (ret)
  802. return ret;
  803. }
  804. }
  805. return 0;
  806. }
  807. /**
  808. * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
  809. *
  810. * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
  811. * disabled. This has the effect of making all managed falcons ready-to-run.
  812. */
  813. static int
  814. acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
  815. unsigned long falcon_mask)
  816. {
  817. int falcon;
  818. int ret;
  819. /*
  820. * Perform secure boot each time we are called on FECS. Since only FECS
  821. * and GPCCS are managed and started together, this ought to be safe.
  822. */
  823. if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
  824. goto end;
  825. ret = acr_r352_shutdown(acr, sb);
  826. if (ret)
  827. return ret;
  828. ret = acr_r352_bootstrap(acr, sb);
  829. if (ret)
  830. return ret;
  831. end:
  832. for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
  833. acr->falcon_state[falcon] = RESET;
  834. }
  835. return 0;
  836. }
  837. /*
  838. * acr_r352_reset() - execute secure boot from the prepared state
  839. *
  840. * Load the HS bootloader and ask the falcon to run it. This will in turn
  841. * load the HS firmware and run it, so once the falcon stops all the managed
  842. * falcons should have their LS firmware loaded and be ready to run.
  843. */
  844. static int
  845. acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
  846. unsigned long falcon_mask)
  847. {
  848. struct acr_r352 *acr = acr_r352(_acr);
  849. struct nvkm_msgqueue *queue;
  850. int falcon;
  851. bool wpr_already_set = sb->wpr_set;
  852. int ret;
  853. /* Make sure secure boot is performed */
  854. ret = acr_r352_bootstrap(acr, sb);
  855. if (ret)
  856. return ret;
  857. /* No PMU interface? */
  858. if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
  859. /* Redo secure boot entirely if it was already done */
  860. if (wpr_already_set)
  861. return acr_r352_reset_nopmu(acr, sb, falcon_mask);
  862. /* Else return the result of the initial invokation */
  863. else
  864. return ret;
  865. }
  866. switch (_acr->boot_falcon) {
  867. case NVKM_SECBOOT_FALCON_PMU:
  868. queue = sb->subdev.device->pmu->queue;
  869. break;
  870. case NVKM_SECBOOT_FALCON_SEC2:
  871. queue = sb->subdev.device->sec2->queue;
  872. break;
  873. default:
  874. return -EINVAL;
  875. }
  876. /* Otherwise just ask the LS firmware to reset the falcon */
  877. for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
  878. nvkm_debug(&sb->subdev, "resetting %s falcon\n",
  879. nvkm_secboot_falcon_name[falcon]);
  880. ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
  881. if (ret) {
  882. nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
  883. return ret;
  884. }
  885. nvkm_debug(&sb->subdev, "falcon reset done\n");
  886. return 0;
  887. }
  888. static int
  889. acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend)
  890. {
  891. struct acr_r352 *acr = acr_r352(_acr);
  892. return acr_r352_shutdown(acr, sb);
  893. }
  894. static void
  895. acr_r352_dtor(struct nvkm_acr *_acr)
  896. {
  897. struct acr_r352 *acr = acr_r352(_acr);
  898. nvkm_gpuobj_del(&acr->unload_blob);
  899. if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
  900. kfree(acr->hsbl_unload_blob);
  901. kfree(acr->hsbl_blob);
  902. nvkm_gpuobj_del(&acr->load_blob);
  903. nvkm_gpuobj_del(&acr->ls_blob);
  904. kfree(acr);
  905. }
  906. const struct acr_r352_ls_func
  907. acr_r352_ls_fecs_func = {
  908. .load = acr_ls_ucode_load_fecs,
  909. .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
  910. .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
  911. };
  912. const struct acr_r352_ls_func
  913. acr_r352_ls_gpccs_func = {
  914. .load = acr_ls_ucode_load_gpccs,
  915. .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
  916. .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
  917. /* GPCCS will be loaded using PRI */
  918. .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
  919. };
  920. /**
  921. * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
  922. * @dma_idx: DMA context to be used by BL while loading code/data
  923. * @code_dma_base: 256B-aligned Physical FB Address where code is located
  924. * @total_code_size: total size of the code part in the ucode
  925. * @code_size_to_load: size of the code part to load in PMU IMEM.
  926. * @code_entry_point: entry point in the code.
  927. * @data_dma_base: Physical FB address where data part of ucode is located
  928. * @data_size: Total size of the data portion.
  929. * @overlay_dma_base: Physical Fb address for resident code present in ucode
  930. * @argc: Total number of args
  931. * @argv: offset where args are copied into PMU's DMEM.
  932. *
  933. * Structure used by the PMU bootloader to load the rest of the code
  934. */
  935. struct acr_r352_pmu_bl_desc {
  936. u32 dma_idx;
  937. u32 code_dma_base;
  938. u32 code_size_total;
  939. u32 code_size_to_load;
  940. u32 code_entry_point;
  941. u32 data_dma_base;
  942. u32 data_size;
  943. u32 overlay_dma_base;
  944. u32 argc;
  945. u32 argv;
  946. u16 code_dma_base1;
  947. u16 data_dma_base1;
  948. u16 overlay_dma_base1;
  949. };
  950. /**
  951. * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
  952. *
  953. */
  954. static void
  955. acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
  956. const struct ls_ucode_img *img, u64 wpr_addr,
  957. void *_desc)
  958. {
  959. const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
  960. const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
  961. struct acr_r352_pmu_bl_desc *desc = _desc;
  962. u64 base;
  963. u64 addr_code;
  964. u64 addr_data;
  965. u32 addr_args;
  966. base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
  967. addr_code = (base + pdesc->app_resident_code_offset) >> 8;
  968. addr_data = (base + pdesc->app_resident_data_offset) >> 8;
  969. addr_args = pmu->falcon->data.limit;
  970. addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
  971. desc->dma_idx = FALCON_DMAIDX_UCODE;
  972. desc->code_dma_base = lower_32_bits(addr_code);
  973. desc->code_dma_base1 = upper_32_bits(addr_code);
  974. desc->code_size_total = pdesc->app_size;
  975. desc->code_size_to_load = pdesc->app_resident_code_size;
  976. desc->code_entry_point = pdesc->app_imem_entry;
  977. desc->data_dma_base = lower_32_bits(addr_data);
  978. desc->data_dma_base1 = upper_32_bits(addr_data);
  979. desc->data_size = pdesc->app_resident_data_size;
  980. desc->overlay_dma_base = lower_32_bits(addr_code);
  981. desc->overlay_dma_base1 = upper_32_bits(addr_code);
  982. desc->argc = 1;
  983. desc->argv = addr_args;
  984. }
  985. static const struct acr_r352_ls_func
  986. acr_r352_ls_pmu_func = {
  987. .load = acr_ls_ucode_load_pmu,
  988. .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
  989. .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
  990. .post_run = acr_ls_pmu_post_run,
  991. };
  992. const struct acr_r352_func
  993. acr_r352_func = {
  994. .fixup_hs_desc = acr_r352_fixup_hs_desc,
  995. .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
  996. .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
  997. .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
  998. .ls_fill_headers = acr_r352_ls_fill_headers,
  999. .ls_write_wpr = acr_r352_ls_write_wpr,
  1000. .ls_func = {
  1001. [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
  1002. [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
  1003. [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
  1004. },
  1005. };
  1006. static const struct nvkm_acr_func
  1007. acr_r352_base_func = {
  1008. .dtor = acr_r352_dtor,
  1009. .fini = acr_r352_fini,
  1010. .load = acr_r352_load,
  1011. .reset = acr_r352_reset,
  1012. };
  1013. struct nvkm_acr *
  1014. acr_r352_new_(const struct acr_r352_func *func,
  1015. enum nvkm_secboot_falcon boot_falcon,
  1016. unsigned long managed_falcons)
  1017. {
  1018. struct acr_r352 *acr;
  1019. int i;
  1020. /* Check that all requested falcons are supported */
  1021. for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
  1022. if (!func->ls_func[i])
  1023. return ERR_PTR(-ENOTSUPP);
  1024. }
  1025. acr = kzalloc(sizeof(*acr), GFP_KERNEL);
  1026. if (!acr)
  1027. return ERR_PTR(-ENOMEM);
  1028. acr->base.boot_falcon = boot_falcon;
  1029. acr->base.managed_falcons = managed_falcons;
  1030. acr->base.func = &acr_r352_base_func;
  1031. acr->func = func;
  1032. return &acr->base;
  1033. }
  1034. struct nvkm_acr *
  1035. acr_r352_new(unsigned long managed_falcons)
  1036. {
  1037. return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU,
  1038. managed_falcons);
  1039. }