setup.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076
  1. /*
  2. * Machine specific setup for xen
  3. *
  4. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  5. */
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pm.h>
  10. #include <linux/memblock.h>
  11. #include <linux/cpuidle.h>
  12. #include <linux/cpufreq.h>
  13. #include <asm/elf.h>
  14. #include <asm/vdso.h>
  15. #include <asm/e820.h>
  16. #include <asm/setup.h>
  17. #include <asm/acpi.h>
  18. #include <asm/numa.h>
  19. #include <asm/xen/hypervisor.h>
  20. #include <asm/xen/hypercall.h>
  21. #include <xen/xen.h>
  22. #include <xen/page.h>
  23. #include <xen/interface/callback.h>
  24. #include <xen/interface/memory.h>
  25. #include <xen/interface/physdev.h>
  26. #include <xen/features.h>
  27. #include <xen/hvc-console.h>
  28. #include "xen-ops.h"
  29. #include "vdso.h"
  30. #include "mmu.h"
  31. #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
  32. /* Amount of extra memory space we add to the e820 ranges */
  33. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  34. /* Number of pages released from the initial allocation. */
  35. unsigned long xen_released_pages;
  36. /* E820 map used during setting up memory. */
  37. static struct e820entry xen_e820_map[E820MAX] __initdata;
  38. static u32 xen_e820_map_entries __initdata;
  39. /*
  40. * Buffer used to remap identity mapped pages. We only need the virtual space.
  41. * The physical page behind this address is remapped as needed to different
  42. * buffer pages.
  43. */
  44. #define REMAP_SIZE (P2M_PER_PAGE - 3)
  45. static struct {
  46. unsigned long next_area_mfn;
  47. unsigned long target_pfn;
  48. unsigned long size;
  49. unsigned long mfns[REMAP_SIZE];
  50. } xen_remap_buf __initdata __aligned(PAGE_SIZE);
  51. static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
  52. /*
  53. * The maximum amount of extra memory compared to the base size. The
  54. * main scaling factor is the size of struct page. At extreme ratios
  55. * of base:extra, all the base memory can be filled with page
  56. * structures for the extra memory, leaving no space for anything
  57. * else.
  58. *
  59. * 10x seems like a reasonable balance between scaling flexibility and
  60. * leaving a practically usable system.
  61. */
  62. #define EXTRA_MEM_RATIO (10)
  63. static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
  64. static void __init xen_parse_512gb(void)
  65. {
  66. bool val = false;
  67. char *arg;
  68. arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
  69. if (!arg)
  70. return;
  71. arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
  72. if (!arg)
  73. val = true;
  74. else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
  75. return;
  76. xen_512gb_limit = val;
  77. }
  78. static void __init xen_add_extra_mem(unsigned long start_pfn,
  79. unsigned long n_pfns)
  80. {
  81. int i;
  82. /*
  83. * No need to check for zero size, should happen rarely and will only
  84. * write a new entry regarded to be unused due to zero size.
  85. */
  86. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  87. /* Add new region. */
  88. if (xen_extra_mem[i].n_pfns == 0) {
  89. xen_extra_mem[i].start_pfn = start_pfn;
  90. xen_extra_mem[i].n_pfns = n_pfns;
  91. break;
  92. }
  93. /* Append to existing region. */
  94. if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
  95. start_pfn) {
  96. xen_extra_mem[i].n_pfns += n_pfns;
  97. break;
  98. }
  99. }
  100. if (i == XEN_EXTRA_MEM_MAX_REGIONS)
  101. printk(KERN_WARNING "Warning: not enough extra memory regions\n");
  102. memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
  103. }
  104. static void __init xen_del_extra_mem(unsigned long start_pfn,
  105. unsigned long n_pfns)
  106. {
  107. int i;
  108. unsigned long start_r, size_r;
  109. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  110. start_r = xen_extra_mem[i].start_pfn;
  111. size_r = xen_extra_mem[i].n_pfns;
  112. /* Start of region. */
  113. if (start_r == start_pfn) {
  114. BUG_ON(n_pfns > size_r);
  115. xen_extra_mem[i].start_pfn += n_pfns;
  116. xen_extra_mem[i].n_pfns -= n_pfns;
  117. break;
  118. }
  119. /* End of region. */
  120. if (start_r + size_r == start_pfn + n_pfns) {
  121. BUG_ON(n_pfns > size_r);
  122. xen_extra_mem[i].n_pfns -= n_pfns;
  123. break;
  124. }
  125. /* Mid of region. */
  126. if (start_pfn > start_r && start_pfn < start_r + size_r) {
  127. BUG_ON(start_pfn + n_pfns > start_r + size_r);
  128. xen_extra_mem[i].n_pfns = start_pfn - start_r;
  129. /* Calling memblock_reserve() again is okay. */
  130. xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
  131. (start_pfn + n_pfns));
  132. break;
  133. }
  134. }
  135. memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
  136. }
  137. /*
  138. * Called during boot before the p2m list can take entries beyond the
  139. * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
  140. * invalid.
  141. */
  142. unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
  143. {
  144. int i;
  145. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  146. if (pfn >= xen_extra_mem[i].start_pfn &&
  147. pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
  148. return INVALID_P2M_ENTRY;
  149. }
  150. return IDENTITY_FRAME(pfn);
  151. }
  152. /*
  153. * Mark all pfns of extra mem as invalid in p2m list.
  154. */
  155. void __init xen_inv_extra_mem(void)
  156. {
  157. unsigned long pfn, pfn_s, pfn_e;
  158. int i;
  159. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  160. if (!xen_extra_mem[i].n_pfns)
  161. continue;
  162. pfn_s = xen_extra_mem[i].start_pfn;
  163. pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
  164. for (pfn = pfn_s; pfn < pfn_e; pfn++)
  165. set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  166. }
  167. }
  168. /*
  169. * Finds the next RAM pfn available in the E820 map after min_pfn.
  170. * This function updates min_pfn with the pfn found and returns
  171. * the size of that range or zero if not found.
  172. */
  173. static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
  174. {
  175. const struct e820entry *entry = xen_e820_map;
  176. unsigned int i;
  177. unsigned long done = 0;
  178. for (i = 0; i < xen_e820_map_entries; i++, entry++) {
  179. unsigned long s_pfn;
  180. unsigned long e_pfn;
  181. if (entry->type != E820_RAM)
  182. continue;
  183. e_pfn = PFN_DOWN(entry->addr + entry->size);
  184. /* We only care about E820 after this */
  185. if (e_pfn < *min_pfn)
  186. continue;
  187. s_pfn = PFN_UP(entry->addr);
  188. /* If min_pfn falls within the E820 entry, we want to start
  189. * at the min_pfn PFN.
  190. */
  191. if (s_pfn <= *min_pfn) {
  192. done = e_pfn - *min_pfn;
  193. } else {
  194. done = e_pfn - s_pfn;
  195. *min_pfn = s_pfn;
  196. }
  197. break;
  198. }
  199. return done;
  200. }
  201. static int __init xen_free_mfn(unsigned long mfn)
  202. {
  203. struct xen_memory_reservation reservation = {
  204. .address_bits = 0,
  205. .extent_order = 0,
  206. .domid = DOMID_SELF
  207. };
  208. set_xen_guest_handle(reservation.extent_start, &mfn);
  209. reservation.nr_extents = 1;
  210. return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  211. }
  212. /*
  213. * This releases a chunk of memory and then does the identity map. It's used
  214. * as a fallback if the remapping fails.
  215. */
  216. static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
  217. unsigned long end_pfn, unsigned long nr_pages)
  218. {
  219. unsigned long pfn, end;
  220. int ret;
  221. WARN_ON(start_pfn > end_pfn);
  222. /* Release pages first. */
  223. end = min(end_pfn, nr_pages);
  224. for (pfn = start_pfn; pfn < end; pfn++) {
  225. unsigned long mfn = pfn_to_mfn(pfn);
  226. /* Make sure pfn exists to start with */
  227. if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
  228. continue;
  229. ret = xen_free_mfn(mfn);
  230. WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
  231. if (ret == 1) {
  232. xen_released_pages++;
  233. if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
  234. break;
  235. } else
  236. break;
  237. }
  238. set_phys_range_identity(start_pfn, end_pfn);
  239. }
  240. /*
  241. * Helper function to update the p2m and m2p tables and kernel mapping.
  242. */
  243. static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
  244. {
  245. struct mmu_update update = {
  246. .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
  247. .val = pfn
  248. };
  249. /* Update p2m */
  250. if (!set_phys_to_machine(pfn, mfn)) {
  251. WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
  252. pfn, mfn);
  253. BUG();
  254. }
  255. /* Update m2p */
  256. if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
  257. WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
  258. mfn, pfn);
  259. BUG();
  260. }
  261. /* Update kernel mapping, but not for highmem. */
  262. if (pfn >= PFN_UP(__pa(high_memory - 1)))
  263. return;
  264. if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
  265. mfn_pte(mfn, PAGE_KERNEL), 0)) {
  266. WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
  267. mfn, pfn);
  268. BUG();
  269. }
  270. }
  271. /*
  272. * This function updates the p2m and m2p tables with an identity map from
  273. * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
  274. * original allocation at remap_pfn. The information needed for remapping is
  275. * saved in the memory itself to avoid the need for allocating buffers. The
  276. * complete remap information is contained in a list of MFNs each containing
  277. * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
  278. * This enables us to preserve the original mfn sequence while doing the
  279. * remapping at a time when the memory management is capable of allocating
  280. * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
  281. * its callers.
  282. */
  283. static void __init xen_do_set_identity_and_remap_chunk(
  284. unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
  285. {
  286. unsigned long buf = (unsigned long)&xen_remap_buf;
  287. unsigned long mfn_save, mfn;
  288. unsigned long ident_pfn_iter, remap_pfn_iter;
  289. unsigned long ident_end_pfn = start_pfn + size;
  290. unsigned long left = size;
  291. unsigned int i, chunk;
  292. WARN_ON(size == 0);
  293. BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
  294. mfn_save = virt_to_mfn(buf);
  295. for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
  296. ident_pfn_iter < ident_end_pfn;
  297. ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
  298. chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
  299. /* Map first pfn to xen_remap_buf */
  300. mfn = pfn_to_mfn(ident_pfn_iter);
  301. set_pte_mfn(buf, mfn, PAGE_KERNEL);
  302. /* Save mapping information in page */
  303. xen_remap_buf.next_area_mfn = xen_remap_mfn;
  304. xen_remap_buf.target_pfn = remap_pfn_iter;
  305. xen_remap_buf.size = chunk;
  306. for (i = 0; i < chunk; i++)
  307. xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
  308. /* Put remap buf into list. */
  309. xen_remap_mfn = mfn;
  310. /* Set identity map */
  311. set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
  312. left -= chunk;
  313. }
  314. /* Restore old xen_remap_buf mapping */
  315. set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
  316. }
  317. /*
  318. * This function takes a contiguous pfn range that needs to be identity mapped
  319. * and:
  320. *
  321. * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
  322. * 2) Calls the do_ function to actually do the mapping/remapping work.
  323. *
  324. * The goal is to not allocate additional memory but to remap the existing
  325. * pages. In the case of an error the underlying memory is simply released back
  326. * to Xen and not remapped.
  327. */
  328. static unsigned long __init xen_set_identity_and_remap_chunk(
  329. unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
  330. unsigned long remap_pfn)
  331. {
  332. unsigned long pfn;
  333. unsigned long i = 0;
  334. unsigned long n = end_pfn - start_pfn;
  335. while (i < n) {
  336. unsigned long cur_pfn = start_pfn + i;
  337. unsigned long left = n - i;
  338. unsigned long size = left;
  339. unsigned long remap_range_size;
  340. /* Do not remap pages beyond the current allocation */
  341. if (cur_pfn >= nr_pages) {
  342. /* Identity map remaining pages */
  343. set_phys_range_identity(cur_pfn, cur_pfn + size);
  344. break;
  345. }
  346. if (cur_pfn + size > nr_pages)
  347. size = nr_pages - cur_pfn;
  348. remap_range_size = xen_find_pfn_range(&remap_pfn);
  349. if (!remap_range_size) {
  350. pr_warning("Unable to find available pfn range, not remapping identity pages\n");
  351. xen_set_identity_and_release_chunk(cur_pfn,
  352. cur_pfn + left, nr_pages);
  353. break;
  354. }
  355. /* Adjust size to fit in current e820 RAM region */
  356. if (size > remap_range_size)
  357. size = remap_range_size;
  358. xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
  359. /* Update variables to reflect new mappings. */
  360. i += size;
  361. remap_pfn += size;
  362. }
  363. /*
  364. * If the PFNs are currently mapped, the VA mapping also needs
  365. * to be updated to be 1:1.
  366. */
  367. for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
  368. (void)HYPERVISOR_update_va_mapping(
  369. (unsigned long)__va(pfn << PAGE_SHIFT),
  370. mfn_pte(pfn, PAGE_KERNEL_IO), 0);
  371. return remap_pfn;
  372. }
  373. static void __init xen_set_identity_and_remap(unsigned long nr_pages)
  374. {
  375. phys_addr_t start = 0;
  376. unsigned long last_pfn = nr_pages;
  377. const struct e820entry *entry = xen_e820_map;
  378. int i;
  379. /*
  380. * Combine non-RAM regions and gaps until a RAM region (or the
  381. * end of the map) is reached, then set the 1:1 map and
  382. * remap the memory in those non-RAM regions.
  383. *
  384. * The combined non-RAM regions are rounded to a whole number
  385. * of pages so any partial pages are accessible via the 1:1
  386. * mapping. This is needed for some BIOSes that put (for
  387. * example) the DMI tables in a reserved region that begins on
  388. * a non-page boundary.
  389. */
  390. for (i = 0; i < xen_e820_map_entries; i++, entry++) {
  391. phys_addr_t end = entry->addr + entry->size;
  392. if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
  393. unsigned long start_pfn = PFN_DOWN(start);
  394. unsigned long end_pfn = PFN_UP(end);
  395. if (entry->type == E820_RAM)
  396. end_pfn = PFN_UP(entry->addr);
  397. if (start_pfn < end_pfn)
  398. last_pfn = xen_set_identity_and_remap_chunk(
  399. start_pfn, end_pfn, nr_pages,
  400. last_pfn);
  401. start = end;
  402. }
  403. }
  404. pr_info("Released %ld page(s)\n", xen_released_pages);
  405. }
  406. /*
  407. * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
  408. * The remap information (which mfn remap to which pfn) is contained in the
  409. * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
  410. * This scheme allows to remap the different chunks in arbitrary order while
  411. * the resulting mapping will be independant from the order.
  412. */
  413. void __init xen_remap_memory(void)
  414. {
  415. unsigned long buf = (unsigned long)&xen_remap_buf;
  416. unsigned long mfn_save, mfn, pfn;
  417. unsigned long remapped = 0;
  418. unsigned int i;
  419. unsigned long pfn_s = ~0UL;
  420. unsigned long len = 0;
  421. mfn_save = virt_to_mfn(buf);
  422. while (xen_remap_mfn != INVALID_P2M_ENTRY) {
  423. /* Map the remap information */
  424. set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
  425. BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
  426. pfn = xen_remap_buf.target_pfn;
  427. for (i = 0; i < xen_remap_buf.size; i++) {
  428. mfn = xen_remap_buf.mfns[i];
  429. xen_update_mem_tables(pfn, mfn);
  430. remapped++;
  431. pfn++;
  432. }
  433. if (pfn_s == ~0UL || pfn == pfn_s) {
  434. pfn_s = xen_remap_buf.target_pfn;
  435. len += xen_remap_buf.size;
  436. } else if (pfn_s + len == xen_remap_buf.target_pfn) {
  437. len += xen_remap_buf.size;
  438. } else {
  439. xen_del_extra_mem(pfn_s, len);
  440. pfn_s = xen_remap_buf.target_pfn;
  441. len = xen_remap_buf.size;
  442. }
  443. mfn = xen_remap_mfn;
  444. xen_remap_mfn = xen_remap_buf.next_area_mfn;
  445. }
  446. if (pfn_s != ~0UL && len)
  447. xen_del_extra_mem(pfn_s, len);
  448. set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
  449. pr_info("Remapped %ld page(s)\n", remapped);
  450. }
  451. static unsigned long __init xen_get_pages_limit(void)
  452. {
  453. unsigned long limit;
  454. #ifdef CONFIG_X86_32
  455. limit = GB(64) / PAGE_SIZE;
  456. #else
  457. limit = MAXMEM / PAGE_SIZE;
  458. if (!xen_initial_domain() && xen_512gb_limit)
  459. limit = GB(512) / PAGE_SIZE;
  460. #endif
  461. return limit;
  462. }
  463. static unsigned long __init xen_get_max_pages(void)
  464. {
  465. unsigned long max_pages, limit;
  466. domid_t domid = DOMID_SELF;
  467. int ret;
  468. limit = xen_get_pages_limit();
  469. max_pages = limit;
  470. /*
  471. * For the initial domain we use the maximum reservation as
  472. * the maximum page.
  473. *
  474. * For guest domains the current maximum reservation reflects
  475. * the current maximum rather than the static maximum. In this
  476. * case the e820 map provided to us will cover the static
  477. * maximum region.
  478. */
  479. if (xen_initial_domain()) {
  480. ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
  481. if (ret > 0)
  482. max_pages = ret;
  483. }
  484. return min(max_pages, limit);
  485. }
  486. static void __init xen_align_and_add_e820_region(phys_addr_t start,
  487. phys_addr_t size, int type)
  488. {
  489. phys_addr_t end = start + size;
  490. /* Align RAM regions to page boundaries. */
  491. if (type == E820_RAM) {
  492. start = PAGE_ALIGN(start);
  493. end &= ~((phys_addr_t)PAGE_SIZE - 1);
  494. }
  495. e820_add_region(start, end - start, type);
  496. }
  497. static void __init xen_ignore_unusable(void)
  498. {
  499. struct e820entry *entry = xen_e820_map;
  500. unsigned int i;
  501. for (i = 0; i < xen_e820_map_entries; i++, entry++) {
  502. if (entry->type == E820_UNUSABLE)
  503. entry->type = E820_RAM;
  504. }
  505. }
  506. static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
  507. {
  508. unsigned long extra = 0;
  509. unsigned long start_pfn, end_pfn;
  510. const struct e820entry *entry = xen_e820_map;
  511. int i;
  512. end_pfn = 0;
  513. for (i = 0; i < xen_e820_map_entries; i++, entry++) {
  514. start_pfn = PFN_DOWN(entry->addr);
  515. /* Adjacent regions on non-page boundaries handling! */
  516. end_pfn = min(end_pfn, start_pfn);
  517. if (start_pfn >= max_pfn)
  518. return extra + max_pfn - end_pfn;
  519. /* Add any holes in map to result. */
  520. extra += start_pfn - end_pfn;
  521. end_pfn = PFN_UP(entry->addr + entry->size);
  522. end_pfn = min(end_pfn, max_pfn);
  523. if (entry->type != E820_RAM)
  524. extra += end_pfn - start_pfn;
  525. }
  526. return extra;
  527. }
  528. bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
  529. {
  530. struct e820entry *entry;
  531. unsigned mapcnt;
  532. phys_addr_t end;
  533. if (!size)
  534. return false;
  535. end = start + size;
  536. entry = xen_e820_map;
  537. for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
  538. if (entry->type == E820_RAM && entry->addr <= start &&
  539. (entry->addr + entry->size) >= end)
  540. return false;
  541. entry++;
  542. }
  543. return true;
  544. }
  545. /*
  546. * Find a free area in physical memory not yet reserved and compliant with
  547. * E820 map.
  548. * Used to relocate pre-allocated areas like initrd or p2m list which are in
  549. * conflict with the to be used E820 map.
  550. * In case no area is found, return 0. Otherwise return the physical address
  551. * of the area which is already reserved for convenience.
  552. */
  553. phys_addr_t __init xen_find_free_area(phys_addr_t size)
  554. {
  555. unsigned mapcnt;
  556. phys_addr_t addr, start;
  557. struct e820entry *entry = xen_e820_map;
  558. for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
  559. if (entry->type != E820_RAM || entry->size < size)
  560. continue;
  561. start = entry->addr;
  562. for (addr = start; addr < start + size; addr += PAGE_SIZE) {
  563. if (!memblock_is_reserved(addr))
  564. continue;
  565. start = addr + PAGE_SIZE;
  566. if (start + size > entry->addr + entry->size)
  567. break;
  568. }
  569. if (addr >= start + size) {
  570. memblock_reserve(start, size);
  571. return start;
  572. }
  573. }
  574. return 0;
  575. }
  576. /*
  577. * Like memcpy, but with physical addresses for dest and src.
  578. */
  579. static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
  580. phys_addr_t n)
  581. {
  582. phys_addr_t dest_off, src_off, dest_len, src_len, len;
  583. void *from, *to;
  584. while (n) {
  585. dest_off = dest & ~PAGE_MASK;
  586. src_off = src & ~PAGE_MASK;
  587. dest_len = n;
  588. if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
  589. dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
  590. src_len = n;
  591. if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
  592. src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
  593. len = min(dest_len, src_len);
  594. to = early_memremap(dest - dest_off, dest_len + dest_off);
  595. from = early_memremap(src - src_off, src_len + src_off);
  596. memcpy(to, from, len);
  597. early_memunmap(to, dest_len + dest_off);
  598. early_memunmap(from, src_len + src_off);
  599. n -= len;
  600. dest += len;
  601. src += len;
  602. }
  603. }
  604. /*
  605. * Reserve Xen mfn_list.
  606. */
  607. static void __init xen_reserve_xen_mfnlist(void)
  608. {
  609. phys_addr_t start, size;
  610. if (xen_start_info->mfn_list >= __START_KERNEL_map) {
  611. start = __pa(xen_start_info->mfn_list);
  612. size = PFN_ALIGN(xen_start_info->nr_pages *
  613. sizeof(unsigned long));
  614. } else {
  615. start = PFN_PHYS(xen_start_info->first_p2m_pfn);
  616. size = PFN_PHYS(xen_start_info->nr_p2m_frames);
  617. }
  618. if (!xen_is_e820_reserved(start, size)) {
  619. memblock_reserve(start, size);
  620. return;
  621. }
  622. #ifdef CONFIG_X86_32
  623. /*
  624. * Relocating the p2m on 32 bit system to an arbitrary virtual address
  625. * is not supported, so just give up.
  626. */
  627. xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
  628. BUG();
  629. #else
  630. xen_relocate_p2m();
  631. #endif
  632. }
  633. /**
  634. * machine_specific_memory_setup - Hook for machine specific memory setup.
  635. **/
  636. char * __init xen_memory_setup(void)
  637. {
  638. unsigned long max_pfn, pfn_s, n_pfns;
  639. phys_addr_t mem_end, addr, size, chunk_size;
  640. u32 type;
  641. int rc;
  642. struct xen_memory_map memmap;
  643. unsigned long max_pages;
  644. unsigned long extra_pages = 0;
  645. int i;
  646. int op;
  647. xen_parse_512gb();
  648. max_pfn = xen_get_pages_limit();
  649. max_pfn = min(max_pfn, xen_start_info->nr_pages);
  650. mem_end = PFN_PHYS(max_pfn);
  651. memmap.nr_entries = E820MAX;
  652. set_xen_guest_handle(memmap.buffer, xen_e820_map);
  653. op = xen_initial_domain() ?
  654. XENMEM_machine_memory_map :
  655. XENMEM_memory_map;
  656. rc = HYPERVISOR_memory_op(op, &memmap);
  657. if (rc == -ENOSYS) {
  658. BUG_ON(xen_initial_domain());
  659. memmap.nr_entries = 1;
  660. xen_e820_map[0].addr = 0ULL;
  661. xen_e820_map[0].size = mem_end;
  662. /* 8MB slack (to balance backend allocations). */
  663. xen_e820_map[0].size += 8ULL << 20;
  664. xen_e820_map[0].type = E820_RAM;
  665. rc = 0;
  666. }
  667. BUG_ON(rc);
  668. BUG_ON(memmap.nr_entries == 0);
  669. xen_e820_map_entries = memmap.nr_entries;
  670. /*
  671. * Xen won't allow a 1:1 mapping to be created to UNUSABLE
  672. * regions, so if we're using the machine memory map leave the
  673. * region as RAM as it is in the pseudo-physical map.
  674. *
  675. * UNUSABLE regions in domUs are not handled and will need
  676. * a patch in the future.
  677. */
  678. if (xen_initial_domain())
  679. xen_ignore_unusable();
  680. /* Make sure the Xen-supplied memory map is well-ordered. */
  681. sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
  682. &xen_e820_map_entries);
  683. max_pages = xen_get_max_pages();
  684. /* How many extra pages do we need due to remapping? */
  685. max_pages += xen_count_remap_pages(max_pfn);
  686. if (max_pages > max_pfn)
  687. extra_pages += max_pages - max_pfn;
  688. /*
  689. * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
  690. * factor the base size. On non-highmem systems, the base
  691. * size is the full initial memory allocation; on highmem it
  692. * is limited to the max size of lowmem, so that it doesn't
  693. * get completely filled.
  694. *
  695. * Make sure we have no memory above max_pages, as this area
  696. * isn't handled by the p2m management.
  697. *
  698. * In principle there could be a problem in lowmem systems if
  699. * the initial memory is also very large with respect to
  700. * lowmem, but we won't try to deal with that here.
  701. */
  702. extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
  703. extra_pages, max_pages - max_pfn);
  704. i = 0;
  705. addr = xen_e820_map[0].addr;
  706. size = xen_e820_map[0].size;
  707. while (i < xen_e820_map_entries) {
  708. chunk_size = size;
  709. type = xen_e820_map[i].type;
  710. if (type == E820_RAM) {
  711. if (addr < mem_end) {
  712. chunk_size = min(size, mem_end - addr);
  713. } else if (extra_pages) {
  714. chunk_size = min(size, PFN_PHYS(extra_pages));
  715. pfn_s = PFN_UP(addr);
  716. n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
  717. extra_pages -= n_pfns;
  718. xen_add_extra_mem(pfn_s, n_pfns);
  719. xen_max_p2m_pfn = pfn_s + n_pfns;
  720. } else
  721. type = E820_UNUSABLE;
  722. }
  723. xen_align_and_add_e820_region(addr, chunk_size, type);
  724. addr += chunk_size;
  725. size -= chunk_size;
  726. if (size == 0) {
  727. i++;
  728. if (i < xen_e820_map_entries) {
  729. addr = xen_e820_map[i].addr;
  730. size = xen_e820_map[i].size;
  731. }
  732. }
  733. }
  734. /*
  735. * Set the rest as identity mapped, in case PCI BARs are
  736. * located here.
  737. */
  738. set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
  739. /*
  740. * In domU, the ISA region is normal, usable memory, but we
  741. * reserve ISA memory anyway because too many things poke
  742. * about in there.
  743. */
  744. e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
  745. E820_RESERVED);
  746. sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  747. /*
  748. * Check whether the kernel itself conflicts with the target E820 map.
  749. * Failing now is better than running into weird problems later due
  750. * to relocating (and even reusing) pages with kernel text or data.
  751. */
  752. if (xen_is_e820_reserved(__pa_symbol(_text),
  753. __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
  754. xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
  755. BUG();
  756. }
  757. /*
  758. * Check for a conflict of the hypervisor supplied page tables with
  759. * the target E820 map.
  760. */
  761. xen_pt_check_e820();
  762. xen_reserve_xen_mfnlist();
  763. /* Check for a conflict of the initrd with the target E820 map. */
  764. if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
  765. boot_params.hdr.ramdisk_size)) {
  766. phys_addr_t new_area, start, size;
  767. new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
  768. if (!new_area) {
  769. xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
  770. BUG();
  771. }
  772. start = boot_params.hdr.ramdisk_image;
  773. size = boot_params.hdr.ramdisk_size;
  774. xen_phys_memcpy(new_area, start, size);
  775. pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
  776. start, start + size, new_area, new_area + size);
  777. memblock_free(start, size);
  778. boot_params.hdr.ramdisk_image = new_area;
  779. boot_params.ext_ramdisk_image = new_area >> 32;
  780. }
  781. /*
  782. * Set identity map on non-RAM pages and prepare remapping the
  783. * underlying RAM.
  784. */
  785. xen_set_identity_and_remap(max_pfn);
  786. return "Xen";
  787. }
  788. /*
  789. * Machine specific memory setup for auto-translated guests.
  790. */
  791. char * __init xen_auto_xlated_memory_setup(void)
  792. {
  793. struct xen_memory_map memmap;
  794. int i;
  795. int rc;
  796. memmap.nr_entries = E820MAX;
  797. set_xen_guest_handle(memmap.buffer, xen_e820_map);
  798. rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
  799. if (rc < 0)
  800. panic("No memory map (%d)\n", rc);
  801. xen_e820_map_entries = memmap.nr_entries;
  802. sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
  803. &xen_e820_map_entries);
  804. for (i = 0; i < xen_e820_map_entries; i++)
  805. e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
  806. xen_e820_map[i].type);
  807. /* Remove p2m info, it is not needed. */
  808. xen_start_info->mfn_list = 0;
  809. xen_start_info->first_p2m_pfn = 0;
  810. xen_start_info->nr_p2m_frames = 0;
  811. return "Xen";
  812. }
  813. /*
  814. * Set the bit indicating "nosegneg" library variants should be used.
  815. * We only need to bother in pure 32-bit mode; compat 32-bit processes
  816. * can have un-truncated segments, so wrapping around is allowed.
  817. */
  818. static void __init fiddle_vdso(void)
  819. {
  820. #ifdef CONFIG_X86_32
  821. /*
  822. * This could be called before selected_vdso32 is initialized, so
  823. * just fiddle with both possible images. vdso_image_32_syscall
  824. * can't be selected, since it only exists on 64-bit systems.
  825. */
  826. u32 *mask;
  827. mask = vdso_image_32_int80.data +
  828. vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
  829. *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
  830. mask = vdso_image_32_sysenter.data +
  831. vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
  832. *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
  833. #endif
  834. }
  835. static int register_callback(unsigned type, const void *func)
  836. {
  837. struct callback_register callback = {
  838. .type = type,
  839. .address = XEN_CALLBACK(__KERNEL_CS, func),
  840. .flags = CALLBACKF_mask_events,
  841. };
  842. return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
  843. }
  844. void xen_enable_sysenter(void)
  845. {
  846. int ret;
  847. unsigned sysenter_feature;
  848. #ifdef CONFIG_X86_32
  849. sysenter_feature = X86_FEATURE_SEP;
  850. #else
  851. sysenter_feature = X86_FEATURE_SYSENTER32;
  852. #endif
  853. if (!boot_cpu_has(sysenter_feature))
  854. return;
  855. ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
  856. if(ret != 0)
  857. setup_clear_cpu_cap(sysenter_feature);
  858. }
  859. void xen_enable_syscall(void)
  860. {
  861. #ifdef CONFIG_X86_64
  862. int ret;
  863. ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
  864. if (ret != 0) {
  865. printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
  866. /* Pretty fatal; 64-bit userspace has no other
  867. mechanism for syscalls. */
  868. }
  869. if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
  870. ret = register_callback(CALLBACKTYPE_syscall32,
  871. xen_syscall32_target);
  872. if (ret != 0)
  873. setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
  874. }
  875. #endif /* CONFIG_X86_64 */
  876. }
  877. void __init xen_pvmmu_arch_setup(void)
  878. {
  879. HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
  880. HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
  881. HYPERVISOR_vm_assist(VMASST_CMD_enable,
  882. VMASST_TYPE_pae_extended_cr3);
  883. if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
  884. register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
  885. BUG();
  886. xen_enable_sysenter();
  887. xen_enable_syscall();
  888. }
  889. /* This function is not called for HVM domains */
  890. void __init xen_arch_setup(void)
  891. {
  892. xen_panic_handler_init();
  893. if (!xen_feature(XENFEAT_auto_translated_physmap))
  894. xen_pvmmu_arch_setup();
  895. #ifdef CONFIG_ACPI
  896. if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
  897. printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
  898. disable_acpi();
  899. }
  900. #endif
  901. memcpy(boot_command_line, xen_start_info->cmd_line,
  902. MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
  903. COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
  904. /* Set up idle, making sure it calls safe_halt() pvop */
  905. disable_cpuidle();
  906. disable_cpufreq();
  907. WARN_ON(xen_set_default_idle());
  908. fiddle_vdso();
  909. #ifdef CONFIG_NUMA
  910. numa_off = 1;
  911. #endif
  912. }