setup.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * Machine specific setup for xen
  3. *
  4. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  5. */
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pm.h>
  10. #include <linux/memblock.h>
  11. #include <linux/cpuidle.h>
  12. #include <linux/cpufreq.h>
  13. #include <asm/elf.h>
  14. #include <asm/vdso.h>
  15. #include <asm/e820.h>
  16. #include <asm/setup.h>
  17. #include <asm/acpi.h>
  18. #include <asm/numa.h>
  19. #include <asm/xen/hypervisor.h>
  20. #include <asm/xen/hypercall.h>
  21. #include <xen/xen.h>
  22. #include <xen/page.h>
  23. #include <xen/interface/callback.h>
  24. #include <xen/interface/memory.h>
  25. #include <xen/interface/physdev.h>
  26. #include <xen/features.h>
  27. #include "xen-ops.h"
  28. #include "vdso.h"
  29. #include "p2m.h"
  30. #include "mmu.h"
  31. /* Amount of extra memory space we add to the e820 ranges */
  32. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  33. /* Number of pages released from the initial allocation. */
  34. unsigned long xen_released_pages;
  35. /*
  36. * Buffer used to remap identity mapped pages. We only need the virtual space.
  37. * The physical page behind this address is remapped as needed to different
  38. * buffer pages.
  39. */
  40. #define REMAP_SIZE (P2M_PER_PAGE - 3)
  41. static struct {
  42. unsigned long next_area_mfn;
  43. unsigned long target_pfn;
  44. unsigned long size;
  45. unsigned long mfns[REMAP_SIZE];
  46. } xen_remap_buf __initdata __aligned(PAGE_SIZE);
  47. static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
  48. /*
  49. * The maximum amount of extra memory compared to the base size. The
  50. * main scaling factor is the size of struct page. At extreme ratios
  51. * of base:extra, all the base memory can be filled with page
  52. * structures for the extra memory, leaving no space for anything
  53. * else.
  54. *
  55. * 10x seems like a reasonable balance between scaling flexibility and
  56. * leaving a practically usable system.
  57. */
  58. #define EXTRA_MEM_RATIO (10)
  59. static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
  60. {
  61. int i;
  62. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  63. /* Add new region. */
  64. if (xen_extra_mem[i].size == 0) {
  65. xen_extra_mem[i].start = start;
  66. xen_extra_mem[i].size = size;
  67. break;
  68. }
  69. /* Append to existing region. */
  70. if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
  71. xen_extra_mem[i].size += size;
  72. break;
  73. }
  74. }
  75. if (i == XEN_EXTRA_MEM_MAX_REGIONS)
  76. printk(KERN_WARNING "Warning: not enough extra memory regions\n");
  77. memblock_reserve(start, size);
  78. }
  79. static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
  80. {
  81. int i;
  82. phys_addr_t start_r, size_r;
  83. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  84. start_r = xen_extra_mem[i].start;
  85. size_r = xen_extra_mem[i].size;
  86. /* Start of region. */
  87. if (start_r == start) {
  88. BUG_ON(size > size_r);
  89. xen_extra_mem[i].start += size;
  90. xen_extra_mem[i].size -= size;
  91. break;
  92. }
  93. /* End of region. */
  94. if (start_r + size_r == start + size) {
  95. BUG_ON(size > size_r);
  96. xen_extra_mem[i].size -= size;
  97. break;
  98. }
  99. /* Mid of region. */
  100. if (start > start_r && start < start_r + size_r) {
  101. BUG_ON(start + size > start_r + size_r);
  102. xen_extra_mem[i].size = start - start_r;
  103. /* Calling memblock_reserve() again is okay. */
  104. xen_add_extra_mem(start + size, start_r + size_r -
  105. (start + size));
  106. break;
  107. }
  108. }
  109. memblock_free(start, size);
  110. }
  111. /*
  112. * Called during boot before the p2m list can take entries beyond the
  113. * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
  114. * invalid.
  115. */
  116. unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
  117. {
  118. int i;
  119. phys_addr_t addr = PFN_PHYS(pfn);
  120. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  121. if (addr >= xen_extra_mem[i].start &&
  122. addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
  123. return INVALID_P2M_ENTRY;
  124. }
  125. return IDENTITY_FRAME(pfn);
  126. }
  127. /*
  128. * Mark all pfns of extra mem as invalid in p2m list.
  129. */
  130. void __init xen_inv_extra_mem(void)
  131. {
  132. unsigned long pfn, pfn_s, pfn_e;
  133. int i;
  134. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  135. if (!xen_extra_mem[i].size)
  136. continue;
  137. pfn_s = PFN_DOWN(xen_extra_mem[i].start);
  138. pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
  139. for (pfn = pfn_s; pfn < pfn_e; pfn++)
  140. set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  141. }
  142. }
  143. /*
  144. * Finds the next RAM pfn available in the E820 map after min_pfn.
  145. * This function updates min_pfn with the pfn found and returns
  146. * the size of that range or zero if not found.
  147. */
  148. static unsigned long __init xen_find_pfn_range(
  149. const struct e820entry *list, size_t map_size,
  150. unsigned long *min_pfn)
  151. {
  152. const struct e820entry *entry;
  153. unsigned int i;
  154. unsigned long done = 0;
  155. for (i = 0, entry = list; i < map_size; i++, entry++) {
  156. unsigned long s_pfn;
  157. unsigned long e_pfn;
  158. if (entry->type != E820_RAM)
  159. continue;
  160. e_pfn = PFN_DOWN(entry->addr + entry->size);
  161. /* We only care about E820 after this */
  162. if (e_pfn < *min_pfn)
  163. continue;
  164. s_pfn = PFN_UP(entry->addr);
  165. /* If min_pfn falls within the E820 entry, we want to start
  166. * at the min_pfn PFN.
  167. */
  168. if (s_pfn <= *min_pfn) {
  169. done = e_pfn - *min_pfn;
  170. } else {
  171. done = e_pfn - s_pfn;
  172. *min_pfn = s_pfn;
  173. }
  174. break;
  175. }
  176. return done;
  177. }
  178. static int __init xen_free_mfn(unsigned long mfn)
  179. {
  180. struct xen_memory_reservation reservation = {
  181. .address_bits = 0,
  182. .extent_order = 0,
  183. .domid = DOMID_SELF
  184. };
  185. set_xen_guest_handle(reservation.extent_start, &mfn);
  186. reservation.nr_extents = 1;
  187. return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  188. }
  189. /*
  190. * This releases a chunk of memory and then does the identity map. It's used
  191. * as a fallback if the remapping fails.
  192. */
  193. static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
  194. unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
  195. {
  196. unsigned long pfn, end;
  197. int ret;
  198. WARN_ON(start_pfn > end_pfn);
  199. /* Release pages first. */
  200. end = min(end_pfn, nr_pages);
  201. for (pfn = start_pfn; pfn < end; pfn++) {
  202. unsigned long mfn = pfn_to_mfn(pfn);
  203. /* Make sure pfn exists to start with */
  204. if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
  205. continue;
  206. ret = xen_free_mfn(mfn);
  207. WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
  208. if (ret == 1) {
  209. (*released)++;
  210. if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
  211. break;
  212. } else
  213. break;
  214. }
  215. set_phys_range_identity(start_pfn, end_pfn);
  216. }
  217. /*
  218. * Helper function to update the p2m and m2p tables and kernel mapping.
  219. */
  220. static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
  221. {
  222. struct mmu_update update = {
  223. .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
  224. .val = pfn
  225. };
  226. /* Update p2m */
  227. if (!set_phys_to_machine(pfn, mfn)) {
  228. WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
  229. pfn, mfn);
  230. BUG();
  231. }
  232. /* Update m2p */
  233. if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
  234. WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
  235. mfn, pfn);
  236. BUG();
  237. }
  238. /* Update kernel mapping, but not for highmem. */
  239. if (pfn >= PFN_UP(__pa(high_memory - 1)))
  240. return;
  241. if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
  242. mfn_pte(mfn, PAGE_KERNEL), 0)) {
  243. WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
  244. mfn, pfn);
  245. BUG();
  246. }
  247. }
  248. /*
  249. * This function updates the p2m and m2p tables with an identity map from
  250. * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
  251. * original allocation at remap_pfn. The information needed for remapping is
  252. * saved in the memory itself to avoid the need for allocating buffers. The
  253. * complete remap information is contained in a list of MFNs each containing
  254. * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
  255. * This enables us to preserve the original mfn sequence while doing the
  256. * remapping at a time when the memory management is capable of allocating
  257. * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
  258. * its callers.
  259. */
  260. static void __init xen_do_set_identity_and_remap_chunk(
  261. unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
  262. {
  263. unsigned long buf = (unsigned long)&xen_remap_buf;
  264. unsigned long mfn_save, mfn;
  265. unsigned long ident_pfn_iter, remap_pfn_iter;
  266. unsigned long ident_end_pfn = start_pfn + size;
  267. unsigned long left = size;
  268. unsigned int i, chunk;
  269. WARN_ON(size == 0);
  270. BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
  271. mfn_save = virt_to_mfn(buf);
  272. for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
  273. ident_pfn_iter < ident_end_pfn;
  274. ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
  275. chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
  276. /* Map first pfn to xen_remap_buf */
  277. mfn = pfn_to_mfn(ident_pfn_iter);
  278. set_pte_mfn(buf, mfn, PAGE_KERNEL);
  279. /* Save mapping information in page */
  280. xen_remap_buf.next_area_mfn = xen_remap_mfn;
  281. xen_remap_buf.target_pfn = remap_pfn_iter;
  282. xen_remap_buf.size = chunk;
  283. for (i = 0; i < chunk; i++)
  284. xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
  285. /* Put remap buf into list. */
  286. xen_remap_mfn = mfn;
  287. /* Set identity map */
  288. set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
  289. left -= chunk;
  290. }
  291. /* Restore old xen_remap_buf mapping */
  292. set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
  293. }
  294. /*
  295. * This function takes a contiguous pfn range that needs to be identity mapped
  296. * and:
  297. *
  298. * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
  299. * 2) Calls the do_ function to actually do the mapping/remapping work.
  300. *
  301. * The goal is to not allocate additional memory but to remap the existing
  302. * pages. In the case of an error the underlying memory is simply released back
  303. * to Xen and not remapped.
  304. */
  305. static unsigned long __init xen_set_identity_and_remap_chunk(
  306. const struct e820entry *list, size_t map_size, unsigned long start_pfn,
  307. unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
  308. unsigned long *released, unsigned long *remapped)
  309. {
  310. unsigned long pfn;
  311. unsigned long i = 0;
  312. unsigned long n = end_pfn - start_pfn;
  313. while (i < n) {
  314. unsigned long cur_pfn = start_pfn + i;
  315. unsigned long left = n - i;
  316. unsigned long size = left;
  317. unsigned long remap_range_size;
  318. /* Do not remap pages beyond the current allocation */
  319. if (cur_pfn >= nr_pages) {
  320. /* Identity map remaining pages */
  321. set_phys_range_identity(cur_pfn, cur_pfn + size);
  322. break;
  323. }
  324. if (cur_pfn + size > nr_pages)
  325. size = nr_pages - cur_pfn;
  326. remap_range_size = xen_find_pfn_range(list, map_size,
  327. &remap_pfn);
  328. if (!remap_range_size) {
  329. pr_warning("Unable to find available pfn range, not remapping identity pages\n");
  330. xen_set_identity_and_release_chunk(cur_pfn,
  331. cur_pfn + left, nr_pages, released);
  332. break;
  333. }
  334. /* Adjust size to fit in current e820 RAM region */
  335. if (size > remap_range_size)
  336. size = remap_range_size;
  337. xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
  338. /* Update variables to reflect new mappings. */
  339. i += size;
  340. remap_pfn += size;
  341. *remapped += size;
  342. }
  343. /*
  344. * If the PFNs are currently mapped, the VA mapping also needs
  345. * to be updated to be 1:1.
  346. */
  347. for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
  348. (void)HYPERVISOR_update_va_mapping(
  349. (unsigned long)__va(pfn << PAGE_SHIFT),
  350. mfn_pte(pfn, PAGE_KERNEL_IO), 0);
  351. return remap_pfn;
  352. }
  353. static void __init xen_set_identity_and_remap(
  354. const struct e820entry *list, size_t map_size, unsigned long nr_pages,
  355. unsigned long *released, unsigned long *remapped)
  356. {
  357. phys_addr_t start = 0;
  358. unsigned long last_pfn = nr_pages;
  359. const struct e820entry *entry;
  360. unsigned long num_released = 0;
  361. unsigned long num_remapped = 0;
  362. int i;
  363. /*
  364. * Combine non-RAM regions and gaps until a RAM region (or the
  365. * end of the map) is reached, then set the 1:1 map and
  366. * remap the memory in those non-RAM regions.
  367. *
  368. * The combined non-RAM regions are rounded to a whole number
  369. * of pages so any partial pages are accessible via the 1:1
  370. * mapping. This is needed for some BIOSes that put (for
  371. * example) the DMI tables in a reserved region that begins on
  372. * a non-page boundary.
  373. */
  374. for (i = 0, entry = list; i < map_size; i++, entry++) {
  375. phys_addr_t end = entry->addr + entry->size;
  376. if (entry->type == E820_RAM || i == map_size - 1) {
  377. unsigned long start_pfn = PFN_DOWN(start);
  378. unsigned long end_pfn = PFN_UP(end);
  379. if (entry->type == E820_RAM)
  380. end_pfn = PFN_UP(entry->addr);
  381. if (start_pfn < end_pfn)
  382. last_pfn = xen_set_identity_and_remap_chunk(
  383. list, map_size, start_pfn,
  384. end_pfn, nr_pages, last_pfn,
  385. &num_released, &num_remapped);
  386. start = end;
  387. }
  388. }
  389. *released = num_released;
  390. *remapped = num_remapped;
  391. pr_info("Released %ld page(s)\n", num_released);
  392. }
  393. /*
  394. * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
  395. * The remap information (which mfn remap to which pfn) is contained in the
  396. * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
  397. * This scheme allows to remap the different chunks in arbitrary order while
  398. * the resulting mapping will be independant from the order.
  399. */
  400. void __init xen_remap_memory(void)
  401. {
  402. unsigned long buf = (unsigned long)&xen_remap_buf;
  403. unsigned long mfn_save, mfn, pfn;
  404. unsigned long remapped = 0;
  405. unsigned int i;
  406. unsigned long pfn_s = ~0UL;
  407. unsigned long len = 0;
  408. mfn_save = virt_to_mfn(buf);
  409. while (xen_remap_mfn != INVALID_P2M_ENTRY) {
  410. /* Map the remap information */
  411. set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
  412. BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
  413. pfn = xen_remap_buf.target_pfn;
  414. for (i = 0; i < xen_remap_buf.size; i++) {
  415. mfn = xen_remap_buf.mfns[i];
  416. xen_update_mem_tables(pfn, mfn);
  417. remapped++;
  418. pfn++;
  419. }
  420. if (pfn_s == ~0UL || pfn == pfn_s) {
  421. pfn_s = xen_remap_buf.target_pfn;
  422. len += xen_remap_buf.size;
  423. } else if (pfn_s + len == xen_remap_buf.target_pfn) {
  424. len += xen_remap_buf.size;
  425. } else {
  426. xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
  427. pfn_s = xen_remap_buf.target_pfn;
  428. len = xen_remap_buf.size;
  429. }
  430. mfn = xen_remap_mfn;
  431. xen_remap_mfn = xen_remap_buf.next_area_mfn;
  432. }
  433. if (pfn_s != ~0UL && len)
  434. xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
  435. set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
  436. pr_info("Remapped %ld page(s)\n", remapped);
  437. }
  438. static unsigned long __init xen_get_max_pages(void)
  439. {
  440. unsigned long max_pages = MAX_DOMAIN_PAGES;
  441. domid_t domid = DOMID_SELF;
  442. int ret;
  443. /*
  444. * For the initial domain we use the maximum reservation as
  445. * the maximum page.
  446. *
  447. * For guest domains the current maximum reservation reflects
  448. * the current maximum rather than the static maximum. In this
  449. * case the e820 map provided to us will cover the static
  450. * maximum region.
  451. */
  452. if (xen_initial_domain()) {
  453. ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
  454. if (ret > 0)
  455. max_pages = ret;
  456. }
  457. return min(max_pages, MAX_DOMAIN_PAGES);
  458. }
  459. static void __init xen_align_and_add_e820_region(phys_addr_t start,
  460. phys_addr_t size, int type)
  461. {
  462. phys_addr_t end = start + size;
  463. /* Align RAM regions to page boundaries. */
  464. if (type == E820_RAM) {
  465. start = PAGE_ALIGN(start);
  466. end &= ~((phys_addr_t)PAGE_SIZE - 1);
  467. }
  468. e820_add_region(start, end - start, type);
  469. }
  470. static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
  471. {
  472. struct e820entry *entry;
  473. unsigned int i;
  474. for (i = 0, entry = list; i < map_size; i++, entry++) {
  475. if (entry->type == E820_UNUSABLE)
  476. entry->type = E820_RAM;
  477. }
  478. }
  479. /**
  480. * machine_specific_memory_setup - Hook for machine specific memory setup.
  481. **/
  482. char * __init xen_memory_setup(void)
  483. {
  484. static struct e820entry map[E820MAX] __initdata;
  485. unsigned long max_pfn = xen_start_info->nr_pages;
  486. phys_addr_t mem_end;
  487. int rc;
  488. struct xen_memory_map memmap;
  489. unsigned long max_pages;
  490. unsigned long extra_pages = 0;
  491. unsigned long remapped_pages;
  492. int i;
  493. int op;
  494. max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
  495. mem_end = PFN_PHYS(max_pfn);
  496. memmap.nr_entries = E820MAX;
  497. set_xen_guest_handle(memmap.buffer, map);
  498. op = xen_initial_domain() ?
  499. XENMEM_machine_memory_map :
  500. XENMEM_memory_map;
  501. rc = HYPERVISOR_memory_op(op, &memmap);
  502. if (rc == -ENOSYS) {
  503. BUG_ON(xen_initial_domain());
  504. memmap.nr_entries = 1;
  505. map[0].addr = 0ULL;
  506. map[0].size = mem_end;
  507. /* 8MB slack (to balance backend allocations). */
  508. map[0].size += 8ULL << 20;
  509. map[0].type = E820_RAM;
  510. rc = 0;
  511. }
  512. BUG_ON(rc);
  513. BUG_ON(memmap.nr_entries == 0);
  514. /*
  515. * Xen won't allow a 1:1 mapping to be created to UNUSABLE
  516. * regions, so if we're using the machine memory map leave the
  517. * region as RAM as it is in the pseudo-physical map.
  518. *
  519. * UNUSABLE regions in domUs are not handled and will need
  520. * a patch in the future.
  521. */
  522. if (xen_initial_domain())
  523. xen_ignore_unusable(map, memmap.nr_entries);
  524. /* Make sure the Xen-supplied memory map is well-ordered. */
  525. sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
  526. max_pages = xen_get_max_pages();
  527. if (max_pages > max_pfn)
  528. extra_pages += max_pages - max_pfn;
  529. /*
  530. * Set identity map on non-RAM pages and prepare remapping the
  531. * underlying RAM.
  532. */
  533. xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
  534. &xen_released_pages, &remapped_pages);
  535. extra_pages += xen_released_pages;
  536. extra_pages += remapped_pages;
  537. /*
  538. * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
  539. * factor the base size. On non-highmem systems, the base
  540. * size is the full initial memory allocation; on highmem it
  541. * is limited to the max size of lowmem, so that it doesn't
  542. * get completely filled.
  543. *
  544. * In principle there could be a problem in lowmem systems if
  545. * the initial memory is also very large with respect to
  546. * lowmem, but we won't try to deal with that here.
  547. */
  548. extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
  549. extra_pages);
  550. i = 0;
  551. while (i < memmap.nr_entries) {
  552. phys_addr_t addr = map[i].addr;
  553. phys_addr_t size = map[i].size;
  554. u32 type = map[i].type;
  555. if (type == E820_RAM) {
  556. if (addr < mem_end) {
  557. size = min(size, mem_end - addr);
  558. } else if (extra_pages) {
  559. size = min(size, PFN_PHYS(extra_pages));
  560. extra_pages -= PFN_DOWN(size);
  561. xen_add_extra_mem(addr, size);
  562. xen_max_p2m_pfn = PFN_DOWN(addr + size);
  563. } else
  564. type = E820_UNUSABLE;
  565. }
  566. xen_align_and_add_e820_region(addr, size, type);
  567. map[i].addr += size;
  568. map[i].size -= size;
  569. if (map[i].size == 0)
  570. i++;
  571. }
  572. /*
  573. * Set the rest as identity mapped, in case PCI BARs are
  574. * located here.
  575. *
  576. * PFNs above MAX_P2M_PFN are considered identity mapped as
  577. * well.
  578. */
  579. set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
  580. /*
  581. * In domU, the ISA region is normal, usable memory, but we
  582. * reserve ISA memory anyway because too many things poke
  583. * about in there.
  584. */
  585. e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
  586. E820_RESERVED);
  587. /*
  588. * Reserve Xen bits:
  589. * - mfn_list
  590. * - xen_start_info
  591. * See comment above "struct start_info" in <xen/interface/xen.h>
  592. * We tried to make the the memblock_reserve more selective so
  593. * that it would be clear what region is reserved. Sadly we ran
  594. * in the problem wherein on a 64-bit hypervisor with a 32-bit
  595. * initial domain, the pt_base has the cr3 value which is not
  596. * neccessarily where the pagetable starts! As Jan put it: "
  597. * Actually, the adjustment turns out to be correct: The page
  598. * tables for a 32-on-64 dom0 get allocated in the order "first L1",
  599. * "first L2", "first L3", so the offset to the page table base is
  600. * indeed 2. When reading xen/include/public/xen.h's comment
  601. * very strictly, this is not a violation (since there nothing is said
  602. * that the first thing in the page table space is pointed to by
  603. * pt_base; I admit that this seems to be implied though, namely
  604. * do I think that it is implied that the page table space is the
  605. * range [pt_base, pt_base + nt_pt_frames), whereas that
  606. * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
  607. * which - without a priori knowledge - the kernel would have
  608. * difficulty to figure out)." - so lets just fall back to the
  609. * easy way and reserve the whole region.
  610. */
  611. memblock_reserve(__pa(xen_start_info->mfn_list),
  612. xen_start_info->pt_base - xen_start_info->mfn_list);
  613. sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  614. return "Xen";
  615. }
  616. /*
  617. * Machine specific memory setup for auto-translated guests.
  618. */
  619. char * __init xen_auto_xlated_memory_setup(void)
  620. {
  621. static struct e820entry map[E820MAX] __initdata;
  622. struct xen_memory_map memmap;
  623. int i;
  624. int rc;
  625. memmap.nr_entries = E820MAX;
  626. set_xen_guest_handle(memmap.buffer, map);
  627. rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
  628. if (rc < 0)
  629. panic("No memory map (%d)\n", rc);
  630. sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
  631. for (i = 0; i < memmap.nr_entries; i++)
  632. e820_add_region(map[i].addr, map[i].size, map[i].type);
  633. memblock_reserve(__pa(xen_start_info->mfn_list),
  634. xen_start_info->pt_base - xen_start_info->mfn_list);
  635. return "Xen";
  636. }
  637. /*
  638. * Set the bit indicating "nosegneg" library variants should be used.
  639. * We only need to bother in pure 32-bit mode; compat 32-bit processes
  640. * can have un-truncated segments, so wrapping around is allowed.
  641. */
  642. static void __init fiddle_vdso(void)
  643. {
  644. #ifdef CONFIG_X86_32
  645. /*
  646. * This could be called before selected_vdso32 is initialized, so
  647. * just fiddle with both possible images. vdso_image_32_syscall
  648. * can't be selected, since it only exists on 64-bit systems.
  649. */
  650. u32 *mask;
  651. mask = vdso_image_32_int80.data +
  652. vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
  653. *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
  654. mask = vdso_image_32_sysenter.data +
  655. vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
  656. *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
  657. #endif
  658. }
  659. static int register_callback(unsigned type, const void *func)
  660. {
  661. struct callback_register callback = {
  662. .type = type,
  663. .address = XEN_CALLBACK(__KERNEL_CS, func),
  664. .flags = CALLBACKF_mask_events,
  665. };
  666. return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
  667. }
  668. void xen_enable_sysenter(void)
  669. {
  670. int ret;
  671. unsigned sysenter_feature;
  672. #ifdef CONFIG_X86_32
  673. sysenter_feature = X86_FEATURE_SEP;
  674. #else
  675. sysenter_feature = X86_FEATURE_SYSENTER32;
  676. #endif
  677. if (!boot_cpu_has(sysenter_feature))
  678. return;
  679. ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
  680. if(ret != 0)
  681. setup_clear_cpu_cap(sysenter_feature);
  682. }
  683. void xen_enable_syscall(void)
  684. {
  685. #ifdef CONFIG_X86_64
  686. int ret;
  687. ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
  688. if (ret != 0) {
  689. printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
  690. /* Pretty fatal; 64-bit userspace has no other
  691. mechanism for syscalls. */
  692. }
  693. if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
  694. ret = register_callback(CALLBACKTYPE_syscall32,
  695. xen_syscall32_target);
  696. if (ret != 0)
  697. setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
  698. }
  699. #endif /* CONFIG_X86_64 */
  700. }
  701. void __init xen_pvmmu_arch_setup(void)
  702. {
  703. HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
  704. HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
  705. HYPERVISOR_vm_assist(VMASST_CMD_enable,
  706. VMASST_TYPE_pae_extended_cr3);
  707. if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
  708. register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
  709. BUG();
  710. xen_enable_sysenter();
  711. xen_enable_syscall();
  712. }
  713. /* This function is not called for HVM domains */
  714. void __init xen_arch_setup(void)
  715. {
  716. xen_panic_handler_init();
  717. if (!xen_feature(XENFEAT_auto_translated_physmap))
  718. xen_pvmmu_arch_setup();
  719. #ifdef CONFIG_ACPI
  720. if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
  721. printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
  722. disable_acpi();
  723. }
  724. #endif
  725. memcpy(boot_command_line, xen_start_info->cmd_line,
  726. MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
  727. COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
  728. /* Set up idle, making sure it calls safe_halt() pvop */
  729. disable_cpuidle();
  730. disable_cpufreq();
  731. WARN_ON(xen_set_default_idle());
  732. fiddle_vdso();
  733. #ifdef CONFIG_NUMA
  734. numa_off = 1;
  735. #endif
  736. }