efi_64.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * x86_64 specific EFI support functions
  3. * Based on Extensible Firmware Interface Specification version 1.0
  4. *
  5. * Copyright (C) 2005-2008 Intel Co.
  6. * Fenghua Yu <fenghua.yu@intel.com>
  7. * Bibo Mao <bibo.mao@intel.com>
  8. * Chandramouli Narayanan <mouli@linux.intel.com>
  9. * Huang Ying <ying.huang@intel.com>
  10. *
  11. * Code to convert EFI to E820 map has been implemented in elilo bootloader
  12. * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
  13. * is setup appropriately for EFI runtime code.
  14. * - mouli 06/14/2007.
  15. *
  16. */
  17. #define pr_fmt(fmt) "efi: " fmt
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/mm.h>
  21. #include <linux/types.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/bootmem.h>
  24. #include <linux/ioport.h>
  25. #include <linux/init.h>
  26. #include <linux/mc146818rtc.h>
  27. #include <linux/efi.h>
  28. #include <linux/uaccess.h>
  29. #include <linux/io.h>
  30. #include <linux/reboot.h>
  31. #include <linux/slab.h>
  32. #include <asm/setup.h>
  33. #include <asm/page.h>
  34. #include <asm/e820.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/tlbflush.h>
  37. #include <asm/proto.h>
  38. #include <asm/efi.h>
  39. #include <asm/cacheflush.h>
  40. #include <asm/fixmap.h>
  41. #include <asm/realmode.h>
  42. #include <asm/time.h>
  43. #include <asm/pgalloc.h>
  44. /*
  45. * We allocate runtime services regions bottom-up, starting from -4G, i.e.
  46. * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
  47. */
  48. static u64 efi_va = EFI_VA_START;
  49. struct efi_scratch efi_scratch;
  50. static void __init early_code_mapping_set_exec(int executable)
  51. {
  52. efi_memory_desc_t *md;
  53. if (!(__supported_pte_mask & _PAGE_NX))
  54. return;
  55. /* Make EFI service code area executable */
  56. for_each_efi_memory_desc(md) {
  57. if (md->type == EFI_RUNTIME_SERVICES_CODE ||
  58. md->type == EFI_BOOT_SERVICES_CODE)
  59. efi_set_executable(md, executable);
  60. }
  61. }
  62. pgd_t * __init efi_call_phys_prolog(void)
  63. {
  64. unsigned long vaddress;
  65. pgd_t *save_pgd;
  66. int pgd;
  67. int n_pgds;
  68. if (!efi_enabled(EFI_OLD_MEMMAP)) {
  69. save_pgd = (pgd_t *)read_cr3();
  70. write_cr3((unsigned long)efi_scratch.efi_pgt);
  71. goto out;
  72. }
  73. early_code_mapping_set_exec(1);
  74. n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
  75. save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
  76. for (pgd = 0; pgd < n_pgds; pgd++) {
  77. save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
  78. vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
  79. set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
  80. }
  81. out:
  82. __flush_tlb_all();
  83. return save_pgd;
  84. }
  85. void __init efi_call_phys_epilog(pgd_t *save_pgd)
  86. {
  87. /*
  88. * After the lock is released, the original page table is restored.
  89. */
  90. int pgd_idx;
  91. int nr_pgds;
  92. if (!efi_enabled(EFI_OLD_MEMMAP)) {
  93. write_cr3((unsigned long)save_pgd);
  94. __flush_tlb_all();
  95. return;
  96. }
  97. nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
  98. for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
  99. set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
  100. kfree(save_pgd);
  101. __flush_tlb_all();
  102. early_code_mapping_set_exec(0);
  103. }
  104. static pgd_t *efi_pgd;
  105. /*
  106. * We need our own copy of the higher levels of the page tables
  107. * because we want to avoid inserting EFI region mappings (EFI_VA_END
  108. * to EFI_VA_START) into the standard kernel page tables. Everything
  109. * else can be shared, see efi_sync_low_kernel_mappings().
  110. */
  111. int __init efi_alloc_page_tables(void)
  112. {
  113. pgd_t *pgd;
  114. pud_t *pud;
  115. gfp_t gfp_mask;
  116. if (efi_enabled(EFI_OLD_MEMMAP))
  117. return 0;
  118. gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
  119. efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
  120. if (!efi_pgd)
  121. return -ENOMEM;
  122. pgd = efi_pgd + pgd_index(EFI_VA_END);
  123. pud = pud_alloc_one(NULL, 0);
  124. if (!pud) {
  125. free_page((unsigned long)efi_pgd);
  126. return -ENOMEM;
  127. }
  128. pgd_populate(NULL, pgd, pud);
  129. return 0;
  130. }
  131. /*
  132. * Add low kernel mappings for passing arguments to EFI functions.
  133. */
  134. void efi_sync_low_kernel_mappings(void)
  135. {
  136. unsigned num_entries;
  137. pgd_t *pgd_k, *pgd_efi;
  138. pud_t *pud_k, *pud_efi;
  139. if (efi_enabled(EFI_OLD_MEMMAP))
  140. return;
  141. /*
  142. * We can share all PGD entries apart from the one entry that
  143. * covers the EFI runtime mapping space.
  144. *
  145. * Make sure the EFI runtime region mappings are guaranteed to
  146. * only span a single PGD entry and that the entry also maps
  147. * other important kernel regions.
  148. */
  149. BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
  150. BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
  151. (EFI_VA_END & PGDIR_MASK));
  152. pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
  153. pgd_k = pgd_offset_k(PAGE_OFFSET);
  154. num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
  155. memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
  156. /*
  157. * We share all the PUD entries apart from those that map the
  158. * EFI regions. Copy around them.
  159. */
  160. BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
  161. BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
  162. pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
  163. pud_efi = pud_offset(pgd_efi, 0);
  164. pgd_k = pgd_offset_k(EFI_VA_END);
  165. pud_k = pud_offset(pgd_k, 0);
  166. num_entries = pud_index(EFI_VA_END);
  167. memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
  168. pud_efi = pud_offset(pgd_efi, EFI_VA_START);
  169. pud_k = pud_offset(pgd_k, EFI_VA_START);
  170. num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
  171. memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
  172. }
  173. int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
  174. {
  175. unsigned long pfn, text;
  176. efi_memory_desc_t *md;
  177. struct page *page;
  178. unsigned npages;
  179. pgd_t *pgd;
  180. if (efi_enabled(EFI_OLD_MEMMAP))
  181. return 0;
  182. efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
  183. pgd = efi_pgd;
  184. /*
  185. * It can happen that the physical address of new_memmap lands in memory
  186. * which is not mapped in the EFI page table. Therefore we need to go
  187. * and ident-map those pages containing the map before calling
  188. * phys_efi_set_virtual_address_map().
  189. */
  190. pfn = pa_memmap >> PAGE_SHIFT;
  191. if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | _PAGE_RW)) {
  192. pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
  193. return 1;
  194. }
  195. efi_scratch.use_pgd = true;
  196. /*
  197. * When making calls to the firmware everything needs to be 1:1
  198. * mapped and addressable with 32-bit pointers. Map the kernel
  199. * text and allocate a new stack because we can't rely on the
  200. * stack pointer being < 4GB.
  201. */
  202. if (!IS_ENABLED(CONFIG_EFI_MIXED))
  203. return 0;
  204. /*
  205. * Map all of RAM so that we can access arguments in the 1:1
  206. * mapping when making EFI runtime calls.
  207. */
  208. for_each_efi_memory_desc(md) {
  209. if (md->type != EFI_CONVENTIONAL_MEMORY &&
  210. md->type != EFI_LOADER_DATA &&
  211. md->type != EFI_LOADER_CODE)
  212. continue;
  213. pfn = md->phys_addr >> PAGE_SHIFT;
  214. npages = md->num_pages;
  215. if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, _PAGE_RW)) {
  216. pr_err("Failed to map 1:1 memory\n");
  217. return 1;
  218. }
  219. }
  220. page = alloc_page(GFP_KERNEL|__GFP_DMA32);
  221. if (!page)
  222. panic("Unable to allocate EFI runtime stack < 4GB\n");
  223. efi_scratch.phys_stack = virt_to_phys(page_address(page));
  224. efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
  225. npages = (_etext - _text) >> PAGE_SHIFT;
  226. text = __pa(_text);
  227. pfn = text >> PAGE_SHIFT;
  228. if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) {
  229. pr_err("Failed to map kernel text 1:1\n");
  230. return 1;
  231. }
  232. return 0;
  233. }
  234. static void __init __map_region(efi_memory_desc_t *md, u64 va)
  235. {
  236. unsigned long flags = _PAGE_RW;
  237. unsigned long pfn;
  238. pgd_t *pgd = efi_pgd;
  239. if (!(md->attribute & EFI_MEMORY_WB))
  240. flags |= _PAGE_PCD;
  241. pfn = md->phys_addr >> PAGE_SHIFT;
  242. if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
  243. pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
  244. md->phys_addr, va);
  245. }
  246. void __init efi_map_region(efi_memory_desc_t *md)
  247. {
  248. unsigned long size = md->num_pages << PAGE_SHIFT;
  249. u64 pa = md->phys_addr;
  250. if (efi_enabled(EFI_OLD_MEMMAP))
  251. return old_map_region(md);
  252. /*
  253. * Make sure the 1:1 mappings are present as a catch-all for b0rked
  254. * firmware which doesn't update all internal pointers after switching
  255. * to virtual mode and would otherwise crap on us.
  256. */
  257. __map_region(md, md->phys_addr);
  258. /*
  259. * Enforce the 1:1 mapping as the default virtual address when
  260. * booting in EFI mixed mode, because even though we may be
  261. * running a 64-bit kernel, the firmware may only be 32-bit.
  262. */
  263. if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
  264. md->virt_addr = md->phys_addr;
  265. return;
  266. }
  267. efi_va -= size;
  268. /* Is PA 2M-aligned? */
  269. if (!(pa & (PMD_SIZE - 1))) {
  270. efi_va &= PMD_MASK;
  271. } else {
  272. u64 pa_offset = pa & (PMD_SIZE - 1);
  273. u64 prev_va = efi_va;
  274. /* get us the same offset within this 2M page */
  275. efi_va = (efi_va & PMD_MASK) + pa_offset;
  276. if (efi_va > prev_va)
  277. efi_va -= PMD_SIZE;
  278. }
  279. if (efi_va < EFI_VA_END) {
  280. pr_warn(FW_WARN "VA address range overflow!\n");
  281. return;
  282. }
  283. /* Do the VA map */
  284. __map_region(md, efi_va);
  285. md->virt_addr = efi_va;
  286. }
  287. /*
  288. * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
  289. * md->virt_addr is the original virtual address which had been mapped in kexec
  290. * 1st kernel.
  291. */
  292. void __init efi_map_region_fixed(efi_memory_desc_t *md)
  293. {
  294. __map_region(md, md->virt_addr);
  295. }
  296. void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
  297. u32 type, u64 attribute)
  298. {
  299. unsigned long last_map_pfn;
  300. if (type == EFI_MEMORY_MAPPED_IO)
  301. return ioremap(phys_addr, size);
  302. last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
  303. if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
  304. unsigned long top = last_map_pfn << PAGE_SHIFT;
  305. efi_ioremap(top, size - (top - phys_addr), type, attribute);
  306. }
  307. if (!(attribute & EFI_MEMORY_WB))
  308. efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
  309. return (void __iomem *)__va(phys_addr);
  310. }
  311. void __init parse_efi_setup(u64 phys_addr, u32 data_len)
  312. {
  313. efi_setup = phys_addr + sizeof(struct setup_data);
  314. }
  315. void __init efi_runtime_update_mappings(void)
  316. {
  317. unsigned long pfn;
  318. pgd_t *pgd = efi_pgd;
  319. efi_memory_desc_t *md;
  320. if (efi_enabled(EFI_OLD_MEMMAP)) {
  321. if (__supported_pte_mask & _PAGE_NX)
  322. runtime_code_page_mkexec();
  323. return;
  324. }
  325. if (!efi_enabled(EFI_NX_PE_DATA))
  326. return;
  327. for_each_efi_memory_desc(md) {
  328. unsigned long pf = 0;
  329. if (!(md->attribute & EFI_MEMORY_RUNTIME))
  330. continue;
  331. if (!(md->attribute & EFI_MEMORY_WB))
  332. pf |= _PAGE_PCD;
  333. if ((md->attribute & EFI_MEMORY_XP) ||
  334. (md->type == EFI_RUNTIME_SERVICES_DATA))
  335. pf |= _PAGE_NX;
  336. if (!(md->attribute & EFI_MEMORY_RO) &&
  337. (md->type != EFI_RUNTIME_SERVICES_CODE))
  338. pf |= _PAGE_RW;
  339. /* Update the 1:1 mapping */
  340. pfn = md->phys_addr >> PAGE_SHIFT;
  341. if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf))
  342. pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
  343. md->phys_addr, md->virt_addr);
  344. if (kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf))
  345. pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
  346. md->phys_addr, md->virt_addr);
  347. }
  348. }
  349. void __init efi_dump_pagetable(void)
  350. {
  351. #ifdef CONFIG_EFI_PGT_DUMP
  352. ptdump_walk_pgd_level(NULL, efi_pgd);
  353. #endif
  354. }
  355. #ifdef CONFIG_EFI_MIXED
  356. extern efi_status_t efi64_thunk(u32, ...);
  357. #define runtime_service32(func) \
  358. ({ \
  359. u32 table = (u32)(unsigned long)efi.systab; \
  360. u32 *rt, *___f; \
  361. \
  362. rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
  363. ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
  364. *___f; \
  365. })
  366. /*
  367. * Switch to the EFI page tables early so that we can access the 1:1
  368. * runtime services mappings which are not mapped in any other page
  369. * tables. This function must be called before runtime_service32().
  370. *
  371. * Also, disable interrupts because the IDT points to 64-bit handlers,
  372. * which aren't going to function correctly when we switch to 32-bit.
  373. */
  374. #define efi_thunk(f, ...) \
  375. ({ \
  376. efi_status_t __s; \
  377. unsigned long __flags; \
  378. u32 __func; \
  379. \
  380. local_irq_save(__flags); \
  381. arch_efi_call_virt_setup(); \
  382. \
  383. __func = runtime_service32(f); \
  384. __s = efi64_thunk(__func, __VA_ARGS__); \
  385. \
  386. arch_efi_call_virt_teardown(); \
  387. local_irq_restore(__flags); \
  388. \
  389. __s; \
  390. })
  391. efi_status_t efi_thunk_set_virtual_address_map(
  392. void *phys_set_virtual_address_map,
  393. unsigned long memory_map_size,
  394. unsigned long descriptor_size,
  395. u32 descriptor_version,
  396. efi_memory_desc_t *virtual_map)
  397. {
  398. efi_status_t status;
  399. unsigned long flags;
  400. u32 func;
  401. efi_sync_low_kernel_mappings();
  402. local_irq_save(flags);
  403. efi_scratch.prev_cr3 = read_cr3();
  404. write_cr3((unsigned long)efi_scratch.efi_pgt);
  405. __flush_tlb_all();
  406. func = (u32)(unsigned long)phys_set_virtual_address_map;
  407. status = efi64_thunk(func, memory_map_size, descriptor_size,
  408. descriptor_version, virtual_map);
  409. write_cr3(efi_scratch.prev_cr3);
  410. __flush_tlb_all();
  411. local_irq_restore(flags);
  412. return status;
  413. }
  414. static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
  415. {
  416. efi_status_t status;
  417. u32 phys_tm, phys_tc;
  418. spin_lock(&rtc_lock);
  419. phys_tm = virt_to_phys(tm);
  420. phys_tc = virt_to_phys(tc);
  421. status = efi_thunk(get_time, phys_tm, phys_tc);
  422. spin_unlock(&rtc_lock);
  423. return status;
  424. }
  425. static efi_status_t efi_thunk_set_time(efi_time_t *tm)
  426. {
  427. efi_status_t status;
  428. u32 phys_tm;
  429. spin_lock(&rtc_lock);
  430. phys_tm = virt_to_phys(tm);
  431. status = efi_thunk(set_time, phys_tm);
  432. spin_unlock(&rtc_lock);
  433. return status;
  434. }
  435. static efi_status_t
  436. efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
  437. efi_time_t *tm)
  438. {
  439. efi_status_t status;
  440. u32 phys_enabled, phys_pending, phys_tm;
  441. spin_lock(&rtc_lock);
  442. phys_enabled = virt_to_phys(enabled);
  443. phys_pending = virt_to_phys(pending);
  444. phys_tm = virt_to_phys(tm);
  445. status = efi_thunk(get_wakeup_time, phys_enabled,
  446. phys_pending, phys_tm);
  447. spin_unlock(&rtc_lock);
  448. return status;
  449. }
  450. static efi_status_t
  451. efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
  452. {
  453. efi_status_t status;
  454. u32 phys_tm;
  455. spin_lock(&rtc_lock);
  456. phys_tm = virt_to_phys(tm);
  457. status = efi_thunk(set_wakeup_time, enabled, phys_tm);
  458. spin_unlock(&rtc_lock);
  459. return status;
  460. }
  461. static efi_status_t
  462. efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
  463. u32 *attr, unsigned long *data_size, void *data)
  464. {
  465. efi_status_t status;
  466. u32 phys_name, phys_vendor, phys_attr;
  467. u32 phys_data_size, phys_data;
  468. phys_data_size = virt_to_phys(data_size);
  469. phys_vendor = virt_to_phys(vendor);
  470. phys_name = virt_to_phys(name);
  471. phys_attr = virt_to_phys(attr);
  472. phys_data = virt_to_phys(data);
  473. status = efi_thunk(get_variable, phys_name, phys_vendor,
  474. phys_attr, phys_data_size, phys_data);
  475. return status;
  476. }
  477. static efi_status_t
  478. efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
  479. u32 attr, unsigned long data_size, void *data)
  480. {
  481. u32 phys_name, phys_vendor, phys_data;
  482. efi_status_t status;
  483. phys_name = virt_to_phys(name);
  484. phys_vendor = virt_to_phys(vendor);
  485. phys_data = virt_to_phys(data);
  486. /* If data_size is > sizeof(u32) we've got problems */
  487. status = efi_thunk(set_variable, phys_name, phys_vendor,
  488. attr, data_size, phys_data);
  489. return status;
  490. }
  491. static efi_status_t
  492. efi_thunk_get_next_variable(unsigned long *name_size,
  493. efi_char16_t *name,
  494. efi_guid_t *vendor)
  495. {
  496. efi_status_t status;
  497. u32 phys_name_size, phys_name, phys_vendor;
  498. phys_name_size = virt_to_phys(name_size);
  499. phys_vendor = virt_to_phys(vendor);
  500. phys_name = virt_to_phys(name);
  501. status = efi_thunk(get_next_variable, phys_name_size,
  502. phys_name, phys_vendor);
  503. return status;
  504. }
  505. static efi_status_t
  506. efi_thunk_get_next_high_mono_count(u32 *count)
  507. {
  508. efi_status_t status;
  509. u32 phys_count;
  510. phys_count = virt_to_phys(count);
  511. status = efi_thunk(get_next_high_mono_count, phys_count);
  512. return status;
  513. }
  514. static void
  515. efi_thunk_reset_system(int reset_type, efi_status_t status,
  516. unsigned long data_size, efi_char16_t *data)
  517. {
  518. u32 phys_data;
  519. phys_data = virt_to_phys(data);
  520. efi_thunk(reset_system, reset_type, status, data_size, phys_data);
  521. }
  522. static efi_status_t
  523. efi_thunk_update_capsule(efi_capsule_header_t **capsules,
  524. unsigned long count, unsigned long sg_list)
  525. {
  526. /*
  527. * To properly support this function we would need to repackage
  528. * 'capsules' because the firmware doesn't understand 64-bit
  529. * pointers.
  530. */
  531. return EFI_UNSUPPORTED;
  532. }
  533. static efi_status_t
  534. efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
  535. u64 *remaining_space,
  536. u64 *max_variable_size)
  537. {
  538. efi_status_t status;
  539. u32 phys_storage, phys_remaining, phys_max;
  540. if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
  541. return EFI_UNSUPPORTED;
  542. phys_storage = virt_to_phys(storage_space);
  543. phys_remaining = virt_to_phys(remaining_space);
  544. phys_max = virt_to_phys(max_variable_size);
  545. status = efi_thunk(query_variable_info, attr, phys_storage,
  546. phys_remaining, phys_max);
  547. return status;
  548. }
  549. static efi_status_t
  550. efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
  551. unsigned long count, u64 *max_size,
  552. int *reset_type)
  553. {
  554. /*
  555. * To properly support this function we would need to repackage
  556. * 'capsules' because the firmware doesn't understand 64-bit
  557. * pointers.
  558. */
  559. return EFI_UNSUPPORTED;
  560. }
  561. void efi_thunk_runtime_setup(void)
  562. {
  563. efi.get_time = efi_thunk_get_time;
  564. efi.set_time = efi_thunk_set_time;
  565. efi.get_wakeup_time = efi_thunk_get_wakeup_time;
  566. efi.set_wakeup_time = efi_thunk_set_wakeup_time;
  567. efi.get_variable = efi_thunk_get_variable;
  568. efi.get_next_variable = efi_thunk_get_next_variable;
  569. efi.set_variable = efi_thunk_set_variable;
  570. efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
  571. efi.reset_system = efi_thunk_reset_system;
  572. efi.query_variable_info = efi_thunk_query_variable_info;
  573. efi.update_capsule = efi_thunk_update_capsule;
  574. efi.query_capsule_caps = efi_thunk_query_capsule_caps;
  575. }
  576. #endif /* CONFIG_EFI_MIXED */