acr_r367.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /*
  2. * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20. * DEALINGS IN THE SOFTWARE.
  21. */
  22. #include "acr_r367.h"
  23. #include "acr_r361.h"
  24. #include <core/gpuobj.h>
  25. /*
  26. * r367 ACR: new LS signature format requires a rewrite of LS firmware and
  27. * blob creation functions. Also the hsflcn_desc layout has changed slightly.
  28. */
  29. #define LSF_LSB_DEPMAP_SIZE 11
  30. /**
  31. * struct acr_r367_lsf_lsb_header - LS firmware header
  32. *
  33. * See also struct acr_r352_lsf_lsb_header for documentation.
  34. */
  35. struct acr_r367_lsf_lsb_header {
  36. /**
  37. * LS falcon signatures
  38. * @prd_keys: signature to use in production mode
  39. * @dgb_keys: signature to use in debug mode
  40. * @b_prd_present: whether the production key is present
  41. * @b_dgb_present: whether the debug key is present
  42. * @falcon_id: ID of the falcon the ucode applies to
  43. */
  44. struct {
  45. u8 prd_keys[2][16];
  46. u8 dbg_keys[2][16];
  47. u32 b_prd_present;
  48. u32 b_dbg_present;
  49. u32 falcon_id;
  50. u32 supports_versioning;
  51. u32 version;
  52. u32 depmap_count;
  53. u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
  54. u8 kdf[16];
  55. } signature;
  56. u32 ucode_off;
  57. u32 ucode_size;
  58. u32 data_size;
  59. u32 bl_code_size;
  60. u32 bl_imem_off;
  61. u32 bl_data_off;
  62. u32 bl_data_size;
  63. u32 app_code_off;
  64. u32 app_code_size;
  65. u32 app_data_off;
  66. u32 app_data_size;
  67. u32 flags;
  68. };
  69. /**
  70. * struct acr_r367_lsf_wpr_header - LS blob WPR Header
  71. *
  72. * See also struct acr_r352_lsf_wpr_header for documentation.
  73. */
  74. struct acr_r367_lsf_wpr_header {
  75. u32 falcon_id;
  76. u32 lsb_offset;
  77. u32 bootstrap_owner;
  78. u32 lazy_bootstrap;
  79. u32 bin_version;
  80. u32 status;
  81. #define LSF_IMAGE_STATUS_NONE 0
  82. #define LSF_IMAGE_STATUS_COPY 1
  83. #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
  84. #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
  85. #define LSF_IMAGE_STATUS_VALIDATION_DONE 4
  86. #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
  87. #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
  88. #define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
  89. };
  90. /**
  91. * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
  92. */
  93. struct ls_ucode_img_r367 {
  94. struct ls_ucode_img base;
  95. struct acr_r367_lsf_wpr_header wpr_header;
  96. struct acr_r367_lsf_lsb_header lsb_header;
  97. };
  98. #define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
  99. struct ls_ucode_img *
  100. acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
  101. const struct nvkm_secboot *sb,
  102. enum nvkm_secboot_falcon falcon_id)
  103. {
  104. const struct nvkm_subdev *subdev = acr->base.subdev;
  105. struct ls_ucode_img_r367 *img;
  106. int ret;
  107. img = kzalloc(sizeof(*img), GFP_KERNEL);
  108. if (!img)
  109. return ERR_PTR(-ENOMEM);
  110. img->base.falcon_id = falcon_id;
  111. ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
  112. if (ret) {
  113. kfree(img->base.ucode_data);
  114. kfree(img->base.sig);
  115. kfree(img);
  116. return ERR_PTR(ret);
  117. }
  118. /* Check that the signature size matches our expectations... */
  119. if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
  120. nvkm_error(subdev, "invalid signature size for %s falcon!\n",
  121. nvkm_secboot_falcon_name[falcon_id]);
  122. return ERR_PTR(-EINVAL);
  123. }
  124. /* Copy signature to the right place */
  125. memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
  126. /* not needed? the signature should already have the right value */
  127. img->lsb_header.signature.falcon_id = falcon_id;
  128. return &img->base;
  129. }
  130. #define LSF_LSB_HEADER_ALIGN 256
  131. #define LSF_BL_DATA_ALIGN 256
  132. #define LSF_BL_DATA_SIZE_ALIGN 256
  133. #define LSF_BL_CODE_SIZE_ALIGN 256
  134. #define LSF_UCODE_DATA_ALIGN 4096
  135. static u32
  136. acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
  137. struct ls_ucode_img_r367 *img, u32 offset)
  138. {
  139. struct ls_ucode_img *_img = &img->base;
  140. struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
  141. struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
  142. struct ls_ucode_img_desc *desc = &_img->ucode_desc;
  143. const struct acr_r352_ls_func *func =
  144. acr->func->ls_func[_img->falcon_id];
  145. /* Fill WPR header */
  146. whdr->falcon_id = _img->falcon_id;
  147. whdr->bootstrap_owner = acr->base.boot_falcon;
  148. whdr->bin_version = lhdr->signature.version;
  149. whdr->status = LSF_IMAGE_STATUS_COPY;
  150. /* Skip bootstrapping falcons started by someone else than ACR */
  151. if (acr->lazy_bootstrap & BIT(_img->falcon_id))
  152. whdr->lazy_bootstrap = 1;
  153. /* Align, save off, and include an LSB header size */
  154. offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
  155. whdr->lsb_offset = offset;
  156. offset += sizeof(*lhdr);
  157. /*
  158. * Align, save off, and include the original (static) ucode
  159. * image size
  160. */
  161. offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
  162. _img->ucode_off = lhdr->ucode_off = offset;
  163. offset += _img->ucode_size;
  164. /*
  165. * For falcons that use a boot loader (BL), we append a loader
  166. * desc structure on the end of the ucode image and consider
  167. * this the boot loader data. The host will then copy the loader
  168. * desc args to this space within the WPR region (before locking
  169. * down) and the HS bin will then copy them to DMEM 0 for the
  170. * loader.
  171. */
  172. lhdr->bl_code_size = ALIGN(desc->bootloader_size,
  173. LSF_BL_CODE_SIZE_ALIGN);
  174. lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
  175. LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
  176. lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
  177. lhdr->bl_code_size - lhdr->ucode_size;
  178. /*
  179. * Though the BL is located at 0th offset of the image, the VA
  180. * is different to make sure that it doesn't collide the actual
  181. * OS VA range
  182. */
  183. lhdr->bl_imem_off = desc->bootloader_imem_offset;
  184. lhdr->app_code_off = desc->app_start_offset +
  185. desc->app_resident_code_offset;
  186. lhdr->app_code_size = desc->app_resident_code_size;
  187. lhdr->app_data_off = desc->app_start_offset +
  188. desc->app_resident_data_offset;
  189. lhdr->app_data_size = desc->app_resident_data_size;
  190. lhdr->flags = func->lhdr_flags;
  191. if (_img->falcon_id == acr->base.boot_falcon)
  192. lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
  193. /* Align and save off BL descriptor size */
  194. lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
  195. /*
  196. * Align, save off, and include the additional BL data
  197. */
  198. offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
  199. lhdr->bl_data_off = offset;
  200. offset += lhdr->bl_data_size;
  201. return offset;
  202. }
  203. int
  204. acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
  205. {
  206. struct ls_ucode_img_r367 *img;
  207. struct list_head *l;
  208. u32 count = 0;
  209. u32 offset;
  210. /* Count the number of images to manage */
  211. list_for_each(l, imgs)
  212. count++;
  213. /*
  214. * Start with an array of WPR headers at the base of the WPR.
  215. * The expectation here is that the secure falcon will do a single DMA
  216. * read of this array and cache it internally so it's ok to pack these.
  217. * Also, we add 1 to the falcon count to indicate the end of the array.
  218. */
  219. offset = sizeof(img->wpr_header) * (count + 1);
  220. /*
  221. * Walk the managed falcons, accounting for the LSB structs
  222. * as well as the ucode images.
  223. */
  224. list_for_each_entry(img, imgs, base.node) {
  225. offset = acr_r367_ls_img_fill_headers(acr, img, offset);
  226. }
  227. return offset;
  228. }
  229. int
  230. acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
  231. struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
  232. {
  233. struct ls_ucode_img *_img;
  234. u32 pos = 0;
  235. nvkm_kmap(wpr_blob);
  236. list_for_each_entry(_img, imgs, node) {
  237. struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
  238. const struct acr_r352_ls_func *ls_func =
  239. acr->func->ls_func[_img->falcon_id];
  240. u8 gdesc[ls_func->bl_desc_size];
  241. nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
  242. sizeof(img->wpr_header));
  243. nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
  244. &img->lsb_header, sizeof(img->lsb_header));
  245. /* Generate and write BL descriptor */
  246. memset(gdesc, 0, ls_func->bl_desc_size);
  247. ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
  248. nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
  249. gdesc, ls_func->bl_desc_size);
  250. /* Copy ucode */
  251. nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
  252. _img->ucode_data, _img->ucode_size);
  253. pos += sizeof(img->wpr_header);
  254. }
  255. nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
  256. nvkm_done(wpr_blob);
  257. return 0;
  258. }
  259. struct acr_r367_hsflcn_desc {
  260. u8 reserved_dmem[0x200];
  261. u32 signatures[4];
  262. u32 wpr_region_id;
  263. u32 wpr_offset;
  264. u32 mmu_memory_range;
  265. #define FLCN_ACR_MAX_REGIONS 2
  266. struct {
  267. u32 no_regions;
  268. struct {
  269. u32 start_addr;
  270. u32 end_addr;
  271. u32 region_id;
  272. u32 read_mask;
  273. u32 write_mask;
  274. u32 client_mask;
  275. u32 shadow_mem_start_addr;
  276. } region_props[FLCN_ACR_MAX_REGIONS];
  277. } regions;
  278. u32 ucode_blob_size;
  279. u64 ucode_blob_base __aligned(8);
  280. struct {
  281. u32 vpr_enabled;
  282. u32 vpr_start;
  283. u32 vpr_end;
  284. u32 hdcp_policies;
  285. } vpr_desc;
  286. };
  287. void
  288. acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
  289. void *_desc)
  290. {
  291. struct acr_r367_hsflcn_desc *desc = _desc;
  292. struct nvkm_gpuobj *ls_blob = acr->ls_blob;
  293. /* WPR region information if WPR is not fixed */
  294. if (sb->wpr_size == 0) {
  295. u64 wpr_start = ls_blob->addr;
  296. u64 wpr_end = ls_blob->addr + ls_blob->size;
  297. if (acr->func->shadow_blob)
  298. wpr_start += ls_blob->size / 2;
  299. desc->wpr_region_id = 1;
  300. desc->regions.no_regions = 2;
  301. desc->regions.region_props[0].start_addr = wpr_start >> 8;
  302. desc->regions.region_props[0].end_addr = wpr_end >> 8;
  303. desc->regions.region_props[0].region_id = 1;
  304. desc->regions.region_props[0].read_mask = 0xf;
  305. desc->regions.region_props[0].write_mask = 0xc;
  306. desc->regions.region_props[0].client_mask = 0x2;
  307. if (acr->func->shadow_blob)
  308. desc->regions.region_props[0].shadow_mem_start_addr =
  309. ls_blob->addr >> 8;
  310. else
  311. desc->regions.region_props[0].shadow_mem_start_addr = 0;
  312. } else {
  313. desc->ucode_blob_base = ls_blob->addr;
  314. desc->ucode_blob_size = ls_blob->size;
  315. }
  316. }
  317. const struct acr_r352_func
  318. acr_r367_func = {
  319. .fixup_hs_desc = acr_r367_fixup_hs_desc,
  320. .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
  321. .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
  322. .shadow_blob = true,
  323. .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
  324. .ls_fill_headers = acr_r367_ls_fill_headers,
  325. .ls_write_wpr = acr_r367_ls_write_wpr,
  326. .ls_func = {
  327. [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
  328. [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
  329. [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
  330. [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
  331. },
  332. };
  333. struct nvkm_acr *
  334. acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
  335. unsigned long managed_falcons)
  336. {
  337. return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
  338. }