grant-table.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. /******************************************************************************
  2. * grant_table.c
  3. *
  4. * Granting foreign access to our memory reservation.
  5. *
  6. * Copyright (c) 2005-2006, Christopher Clark
  7. * Copyright (c) 2004-2005, K A Fraser
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation; or, when distributed
  12. * separately from the Linux kernel or incorporated into other
  13. * software packages, subject to the following license:
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a copy
  16. * of this source file (the "Software"), to deal in the Software without
  17. * restriction, including without limitation the rights to use, copy, modify,
  18. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19. * and to permit persons to whom the Software is furnished to do so, subject to
  20. * the following conditions:
  21. *
  22. * The above copyright notice and this permission notice shall be included in
  23. * all copies or substantial portions of the Software.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31. * IN THE SOFTWARE.
  32. */
  33. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  34. #include <linux/module.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/slab.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/io.h>
  41. #include <linux/delay.h>
  42. #include <linux/hardirq.h>
  43. #include <linux/workqueue.h>
  44. #include <xen/xen.h>
  45. #include <xen/interface/xen.h>
  46. #include <xen/page.h>
  47. #include <xen/grant_table.h>
  48. #include <xen/interface/memory.h>
  49. #include <xen/hvc-console.h>
  50. #include <xen/swiotlb-xen.h>
  51. #include <xen/balloon.h>
  52. #include <asm/xen/hypercall.h>
  53. #include <asm/xen/interface.h>
  54. #include <asm/pgtable.h>
  55. #include <asm/sync_bitops.h>
  56. /* External tools reserve first few grant table entries. */
  57. #define NR_RESERVED_ENTRIES 8
  58. #define GNTTAB_LIST_END 0xffffffff
  59. static grant_ref_t **gnttab_list;
  60. static unsigned int nr_grant_frames;
  61. static int gnttab_free_count;
  62. static grant_ref_t gnttab_free_head;
  63. static DEFINE_SPINLOCK(gnttab_list_lock);
  64. struct grant_frames xen_auto_xlat_grant_frames;
  65. static union {
  66. struct grant_entry_v1 *v1;
  67. void *addr;
  68. } gnttab_shared;
  69. /*This is a structure of function pointers for grant table*/
  70. struct gnttab_ops {
  71. /*
  72. * Mapping a list of frames for storing grant entries. Frames parameter
  73. * is used to store grant table address when grant table being setup,
  74. * nr_gframes is the number of frames to map grant table. Returning
  75. * GNTST_okay means success and negative value means failure.
  76. */
  77. int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
  78. /*
  79. * Release a list of frames which are mapped in map_frames for grant
  80. * entry status.
  81. */
  82. void (*unmap_frames)(void);
  83. /*
  84. * Introducing a valid entry into the grant table, granting the frame of
  85. * this grant entry to domain for accessing or transfering. Ref
  86. * parameter is reference of this introduced grant entry, domid is id of
  87. * granted domain, frame is the page frame to be granted, and flags is
  88. * status of the grant entry to be updated.
  89. */
  90. void (*update_entry)(grant_ref_t ref, domid_t domid,
  91. unsigned long frame, unsigned flags);
  92. /*
  93. * Stop granting a grant entry to domain for accessing. Ref parameter is
  94. * reference of a grant entry whose grant access will be stopped,
  95. * readonly is not in use in this function. If the grant entry is
  96. * currently mapped for reading or writing, just return failure(==0)
  97. * directly and don't tear down the grant access. Otherwise, stop grant
  98. * access for this entry and return success(==1).
  99. */
  100. int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
  101. /*
  102. * Stop granting a grant entry to domain for transfer. Ref parameter is
  103. * reference of a grant entry whose grant transfer will be stopped. If
  104. * tranfer has not started, just reclaim the grant entry and return
  105. * failure(==0). Otherwise, wait for the transfer to complete and then
  106. * return the frame.
  107. */
  108. unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
  109. /*
  110. * Query the status of a grant entry. Ref parameter is reference of
  111. * queried grant entry, return value is the status of queried entry.
  112. * Detailed status(writing/reading) can be gotten from the return value
  113. * by bit operations.
  114. */
  115. int (*query_foreign_access)(grant_ref_t ref);
  116. };
  117. static struct gnttab_ops *gnttab_interface;
  118. static int grant_table_version;
  119. static int grefs_per_grant_frame;
  120. static struct gnttab_free_callback *gnttab_free_callback_list;
  121. static int gnttab_expand(unsigned int req_entries);
  122. #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
  123. #define SPP (PAGE_SIZE / sizeof(grant_status_t))
  124. static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
  125. {
  126. return &gnttab_list[(entry) / RPP][(entry) % RPP];
  127. }
  128. /* This can be used as an l-value */
  129. #define gnttab_entry(entry) (*__gnttab_entry(entry))
  130. static int get_free_entries(unsigned count)
  131. {
  132. unsigned long flags;
  133. int ref, rc = 0;
  134. grant_ref_t head;
  135. spin_lock_irqsave(&gnttab_list_lock, flags);
  136. if ((gnttab_free_count < count) &&
  137. ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
  138. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  139. return rc;
  140. }
  141. ref = head = gnttab_free_head;
  142. gnttab_free_count -= count;
  143. while (count-- > 1)
  144. head = gnttab_entry(head);
  145. gnttab_free_head = gnttab_entry(head);
  146. gnttab_entry(head) = GNTTAB_LIST_END;
  147. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  148. return ref;
  149. }
  150. static void do_free_callbacks(void)
  151. {
  152. struct gnttab_free_callback *callback, *next;
  153. callback = gnttab_free_callback_list;
  154. gnttab_free_callback_list = NULL;
  155. while (callback != NULL) {
  156. next = callback->next;
  157. if (gnttab_free_count >= callback->count) {
  158. callback->next = NULL;
  159. callback->fn(callback->arg);
  160. } else {
  161. callback->next = gnttab_free_callback_list;
  162. gnttab_free_callback_list = callback;
  163. }
  164. callback = next;
  165. }
  166. }
  167. static inline void check_free_callbacks(void)
  168. {
  169. if (unlikely(gnttab_free_callback_list))
  170. do_free_callbacks();
  171. }
  172. static void put_free_entry(grant_ref_t ref)
  173. {
  174. unsigned long flags;
  175. spin_lock_irqsave(&gnttab_list_lock, flags);
  176. gnttab_entry(ref) = gnttab_free_head;
  177. gnttab_free_head = ref;
  178. gnttab_free_count++;
  179. check_free_callbacks();
  180. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  181. }
  182. /*
  183. * Following applies to gnttab_update_entry_v1.
  184. * Introducing a valid entry into the grant table:
  185. * 1. Write ent->domid.
  186. * 2. Write ent->frame:
  187. * GTF_permit_access: Frame to which access is permitted.
  188. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
  189. * frame, or zero if none.
  190. * 3. Write memory barrier (WMB).
  191. * 4. Write ent->flags, inc. valid type.
  192. */
  193. static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
  194. unsigned long frame, unsigned flags)
  195. {
  196. gnttab_shared.v1[ref].domid = domid;
  197. gnttab_shared.v1[ref].frame = frame;
  198. wmb();
  199. gnttab_shared.v1[ref].flags = flags;
  200. }
  201. /*
  202. * Public grant-issuing interface functions
  203. */
  204. void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
  205. unsigned long frame, int readonly)
  206. {
  207. gnttab_interface->update_entry(ref, domid, frame,
  208. GTF_permit_access | (readonly ? GTF_readonly : 0));
  209. }
  210. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
  211. int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
  212. int readonly)
  213. {
  214. int ref;
  215. ref = get_free_entries(1);
  216. if (unlikely(ref < 0))
  217. return -ENOSPC;
  218. gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
  219. return ref;
  220. }
  221. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
  222. static int gnttab_query_foreign_access_v1(grant_ref_t ref)
  223. {
  224. return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
  225. }
  226. int gnttab_query_foreign_access(grant_ref_t ref)
  227. {
  228. return gnttab_interface->query_foreign_access(ref);
  229. }
  230. EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
  231. static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
  232. {
  233. u16 flags, nflags;
  234. u16 *pflags;
  235. pflags = &gnttab_shared.v1[ref].flags;
  236. nflags = *pflags;
  237. do {
  238. flags = nflags;
  239. if (flags & (GTF_reading|GTF_writing))
  240. return 0;
  241. } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
  242. return 1;
  243. }
  244. static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  245. {
  246. return gnttab_interface->end_foreign_access_ref(ref, readonly);
  247. }
  248. int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  249. {
  250. if (_gnttab_end_foreign_access_ref(ref, readonly))
  251. return 1;
  252. pr_warn("WARNING: g.e. %#x still in use!\n", ref);
  253. return 0;
  254. }
  255. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
  256. struct deferred_entry {
  257. struct list_head list;
  258. grant_ref_t ref;
  259. bool ro;
  260. uint16_t warn_delay;
  261. struct page *page;
  262. };
  263. static LIST_HEAD(deferred_list);
  264. static void gnttab_handle_deferred(unsigned long);
  265. static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
  266. static void gnttab_handle_deferred(unsigned long unused)
  267. {
  268. unsigned int nr = 10;
  269. struct deferred_entry *first = NULL;
  270. unsigned long flags;
  271. spin_lock_irqsave(&gnttab_list_lock, flags);
  272. while (nr--) {
  273. struct deferred_entry *entry
  274. = list_first_entry(&deferred_list,
  275. struct deferred_entry, list);
  276. if (entry == first)
  277. break;
  278. list_del(&entry->list);
  279. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  280. if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
  281. put_free_entry(entry->ref);
  282. if (entry->page) {
  283. pr_debug("freeing g.e. %#x (pfn %#lx)\n",
  284. entry->ref, page_to_pfn(entry->page));
  285. __free_page(entry->page);
  286. } else
  287. pr_info("freeing g.e. %#x\n", entry->ref);
  288. kfree(entry);
  289. entry = NULL;
  290. } else {
  291. if (!--entry->warn_delay)
  292. pr_info("g.e. %#x still pending\n", entry->ref);
  293. if (!first)
  294. first = entry;
  295. }
  296. spin_lock_irqsave(&gnttab_list_lock, flags);
  297. if (entry)
  298. list_add_tail(&entry->list, &deferred_list);
  299. else if (list_empty(&deferred_list))
  300. break;
  301. }
  302. if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
  303. deferred_timer.expires = jiffies + HZ;
  304. add_timer(&deferred_timer);
  305. }
  306. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  307. }
  308. static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
  309. struct page *page)
  310. {
  311. struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  312. const char *what = KERN_WARNING "leaking";
  313. if (entry) {
  314. unsigned long flags;
  315. entry->ref = ref;
  316. entry->ro = readonly;
  317. entry->page = page;
  318. entry->warn_delay = 60;
  319. spin_lock_irqsave(&gnttab_list_lock, flags);
  320. list_add_tail(&entry->list, &deferred_list);
  321. if (!timer_pending(&deferred_timer)) {
  322. deferred_timer.expires = jiffies + HZ;
  323. add_timer(&deferred_timer);
  324. }
  325. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  326. what = KERN_DEBUG "deferring";
  327. }
  328. printk("%s g.e. %#x (pfn %#lx)\n",
  329. what, ref, page ? page_to_pfn(page) : -1);
  330. }
  331. void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
  332. unsigned long page)
  333. {
  334. if (gnttab_end_foreign_access_ref(ref, readonly)) {
  335. put_free_entry(ref);
  336. if (page != 0)
  337. free_page(page);
  338. } else
  339. gnttab_add_deferred(ref, readonly,
  340. page ? virt_to_page(page) : NULL);
  341. }
  342. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
  343. int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
  344. {
  345. int ref;
  346. ref = get_free_entries(1);
  347. if (unlikely(ref < 0))
  348. return -ENOSPC;
  349. gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
  350. return ref;
  351. }
  352. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
  353. void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
  354. unsigned long pfn)
  355. {
  356. gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
  357. }
  358. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
  359. static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
  360. {
  361. unsigned long frame;
  362. u16 flags;
  363. u16 *pflags;
  364. pflags = &gnttab_shared.v1[ref].flags;
  365. /*
  366. * If a transfer is not even yet started, try to reclaim the grant
  367. * reference and return failure (== 0).
  368. */
  369. while (!((flags = *pflags) & GTF_transfer_committed)) {
  370. if (sync_cmpxchg(pflags, flags, 0) == flags)
  371. return 0;
  372. cpu_relax();
  373. }
  374. /* If a transfer is in progress then wait until it is completed. */
  375. while (!(flags & GTF_transfer_completed)) {
  376. flags = *pflags;
  377. cpu_relax();
  378. }
  379. rmb(); /* Read the frame number /after/ reading completion status. */
  380. frame = gnttab_shared.v1[ref].frame;
  381. BUG_ON(frame == 0);
  382. return frame;
  383. }
  384. unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
  385. {
  386. return gnttab_interface->end_foreign_transfer_ref(ref);
  387. }
  388. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
  389. unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
  390. {
  391. unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  392. put_free_entry(ref);
  393. return frame;
  394. }
  395. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
  396. void gnttab_free_grant_reference(grant_ref_t ref)
  397. {
  398. put_free_entry(ref);
  399. }
  400. EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
  401. void gnttab_free_grant_references(grant_ref_t head)
  402. {
  403. grant_ref_t ref;
  404. unsigned long flags;
  405. int count = 1;
  406. if (head == GNTTAB_LIST_END)
  407. return;
  408. spin_lock_irqsave(&gnttab_list_lock, flags);
  409. ref = head;
  410. while (gnttab_entry(ref) != GNTTAB_LIST_END) {
  411. ref = gnttab_entry(ref);
  412. count++;
  413. }
  414. gnttab_entry(ref) = gnttab_free_head;
  415. gnttab_free_head = head;
  416. gnttab_free_count += count;
  417. check_free_callbacks();
  418. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  419. }
  420. EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
  421. int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
  422. {
  423. int h = get_free_entries(count);
  424. if (h < 0)
  425. return -ENOSPC;
  426. *head = h;
  427. return 0;
  428. }
  429. EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
  430. int gnttab_empty_grant_references(const grant_ref_t *private_head)
  431. {
  432. return (*private_head == GNTTAB_LIST_END);
  433. }
  434. EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
  435. int gnttab_claim_grant_reference(grant_ref_t *private_head)
  436. {
  437. grant_ref_t g = *private_head;
  438. if (unlikely(g == GNTTAB_LIST_END))
  439. return -ENOSPC;
  440. *private_head = gnttab_entry(g);
  441. return g;
  442. }
  443. EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
  444. void gnttab_release_grant_reference(grant_ref_t *private_head,
  445. grant_ref_t release)
  446. {
  447. gnttab_entry(release) = *private_head;
  448. *private_head = release;
  449. }
  450. EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
  451. void gnttab_request_free_callback(struct gnttab_free_callback *callback,
  452. void (*fn)(void *), void *arg, u16 count)
  453. {
  454. unsigned long flags;
  455. struct gnttab_free_callback *cb;
  456. spin_lock_irqsave(&gnttab_list_lock, flags);
  457. /* Check if the callback is already on the list */
  458. cb = gnttab_free_callback_list;
  459. while (cb) {
  460. if (cb == callback)
  461. goto out;
  462. cb = cb->next;
  463. }
  464. callback->fn = fn;
  465. callback->arg = arg;
  466. callback->count = count;
  467. callback->next = gnttab_free_callback_list;
  468. gnttab_free_callback_list = callback;
  469. check_free_callbacks();
  470. out:
  471. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  472. }
  473. EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
  474. void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
  475. {
  476. struct gnttab_free_callback **pcb;
  477. unsigned long flags;
  478. spin_lock_irqsave(&gnttab_list_lock, flags);
  479. for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
  480. if (*pcb == callback) {
  481. *pcb = callback->next;
  482. break;
  483. }
  484. }
  485. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  486. }
  487. EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
  488. static int grow_gnttab_list(unsigned int more_frames)
  489. {
  490. unsigned int new_nr_grant_frames, extra_entries, i;
  491. unsigned int nr_glist_frames, new_nr_glist_frames;
  492. BUG_ON(grefs_per_grant_frame == 0);
  493. new_nr_grant_frames = nr_grant_frames + more_frames;
  494. extra_entries = more_frames * grefs_per_grant_frame;
  495. nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
  496. new_nr_glist_frames =
  497. (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
  498. for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
  499. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
  500. if (!gnttab_list[i])
  501. goto grow_nomem;
  502. }
  503. for (i = grefs_per_grant_frame * nr_grant_frames;
  504. i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
  505. gnttab_entry(i) = i + 1;
  506. gnttab_entry(i) = gnttab_free_head;
  507. gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
  508. gnttab_free_count += extra_entries;
  509. nr_grant_frames = new_nr_grant_frames;
  510. check_free_callbacks();
  511. return 0;
  512. grow_nomem:
  513. while (i-- > nr_glist_frames)
  514. free_page((unsigned long) gnttab_list[i]);
  515. return -ENOMEM;
  516. }
  517. static unsigned int __max_nr_grant_frames(void)
  518. {
  519. struct gnttab_query_size query;
  520. int rc;
  521. query.dom = DOMID_SELF;
  522. rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
  523. if ((rc < 0) || (query.status != GNTST_okay))
  524. return 4; /* Legacy max supported number of frames */
  525. return query.max_nr_frames;
  526. }
  527. unsigned int gnttab_max_grant_frames(void)
  528. {
  529. unsigned int xen_max = __max_nr_grant_frames();
  530. static unsigned int boot_max_nr_grant_frames;
  531. /* First time, initialize it properly. */
  532. if (!boot_max_nr_grant_frames)
  533. boot_max_nr_grant_frames = __max_nr_grant_frames();
  534. if (xen_max > boot_max_nr_grant_frames)
  535. return boot_max_nr_grant_frames;
  536. return xen_max;
  537. }
  538. EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
  539. int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
  540. {
  541. xen_pfn_t *pfn;
  542. unsigned int max_nr_gframes = __max_nr_grant_frames();
  543. unsigned int i;
  544. void *vaddr;
  545. if (xen_auto_xlat_grant_frames.count)
  546. return -EINVAL;
  547. vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
  548. if (vaddr == NULL) {
  549. pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
  550. &addr);
  551. return -ENOMEM;
  552. }
  553. pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
  554. if (!pfn) {
  555. xen_unmap(vaddr);
  556. return -ENOMEM;
  557. }
  558. for (i = 0; i < max_nr_gframes; i++)
  559. pfn[i] = PFN_DOWN(addr) + i;
  560. xen_auto_xlat_grant_frames.vaddr = vaddr;
  561. xen_auto_xlat_grant_frames.pfn = pfn;
  562. xen_auto_xlat_grant_frames.count = max_nr_gframes;
  563. return 0;
  564. }
  565. EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
  566. void gnttab_free_auto_xlat_frames(void)
  567. {
  568. if (!xen_auto_xlat_grant_frames.count)
  569. return;
  570. kfree(xen_auto_xlat_grant_frames.pfn);
  571. xen_unmap(xen_auto_xlat_grant_frames.vaddr);
  572. xen_auto_xlat_grant_frames.pfn = NULL;
  573. xen_auto_xlat_grant_frames.count = 0;
  574. xen_auto_xlat_grant_frames.vaddr = NULL;
  575. }
  576. EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
  577. /**
  578. * gnttab_alloc_pages - alloc pages suitable for grant mapping into
  579. * @nr_pages: number of pages to alloc
  580. * @pages: returns the pages
  581. */
  582. int gnttab_alloc_pages(int nr_pages, struct page **pages)
  583. {
  584. int i;
  585. int ret;
  586. ret = alloc_xenballooned_pages(nr_pages, pages, false);
  587. if (ret < 0)
  588. return ret;
  589. for (i = 0; i < nr_pages; i++) {
  590. #if BITS_PER_LONG < 64
  591. struct xen_page_foreign *foreign;
  592. foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
  593. if (!foreign) {
  594. gnttab_free_pages(nr_pages, pages);
  595. return -ENOMEM;
  596. }
  597. set_page_private(pages[i], (unsigned long)foreign);
  598. #endif
  599. SetPagePrivate(pages[i]);
  600. }
  601. return 0;
  602. }
  603. EXPORT_SYMBOL(gnttab_alloc_pages);
  604. /**
  605. * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
  606. * @nr_pages; number of pages to free
  607. * @pages: the pages
  608. */
  609. void gnttab_free_pages(int nr_pages, struct page **pages)
  610. {
  611. int i;
  612. for (i = 0; i < nr_pages; i++) {
  613. if (PagePrivate(pages[i])) {
  614. #if BITS_PER_LONG < 64
  615. kfree((void *)page_private(pages[i]));
  616. #endif
  617. ClearPagePrivate(pages[i]);
  618. }
  619. }
  620. free_xenballooned_pages(nr_pages, pages);
  621. }
  622. EXPORT_SYMBOL(gnttab_free_pages);
  623. /* Handling of paged out grant targets (GNTST_eagain) */
  624. #define MAX_DELAY 256
  625. static inline void
  626. gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
  627. const char *func)
  628. {
  629. unsigned delay = 1;
  630. do {
  631. BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
  632. if (*status == GNTST_eagain)
  633. msleep(delay++);
  634. } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
  635. if (delay >= MAX_DELAY) {
  636. pr_err("%s: %s eagain grant\n", func, current->comm);
  637. *status = GNTST_bad_page;
  638. }
  639. }
  640. void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
  641. {
  642. struct gnttab_map_grant_ref *op;
  643. if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
  644. BUG();
  645. for (op = batch; op < batch + count; op++)
  646. if (op->status == GNTST_eagain)
  647. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
  648. &op->status, __func__);
  649. }
  650. EXPORT_SYMBOL_GPL(gnttab_batch_map);
  651. void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
  652. {
  653. struct gnttab_copy *op;
  654. if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
  655. BUG();
  656. for (op = batch; op < batch + count; op++)
  657. if (op->status == GNTST_eagain)
  658. gnttab_retry_eagain_gop(GNTTABOP_copy, op,
  659. &op->status, __func__);
  660. }
  661. EXPORT_SYMBOL_GPL(gnttab_batch_copy);
  662. int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
  663. struct gnttab_map_grant_ref *kmap_ops,
  664. struct page **pages, unsigned int count)
  665. {
  666. int i, ret;
  667. ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
  668. if (ret)
  669. return ret;
  670. for (i = 0; i < count; i++) {
  671. /* Retry eagain maps */
  672. if (map_ops[i].status == GNTST_eagain)
  673. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
  674. &map_ops[i].status, __func__);
  675. if (map_ops[i].status == GNTST_okay) {
  676. struct xen_page_foreign *foreign;
  677. SetPageForeign(pages[i]);
  678. foreign = xen_page_foreign(pages[i]);
  679. foreign->domid = map_ops[i].dom;
  680. foreign->gref = map_ops[i].ref;
  681. }
  682. }
  683. return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
  684. }
  685. EXPORT_SYMBOL_GPL(gnttab_map_refs);
  686. int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
  687. struct gnttab_unmap_grant_ref *kunmap_ops,
  688. struct page **pages, unsigned int count)
  689. {
  690. unsigned int i;
  691. int ret;
  692. ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
  693. if (ret)
  694. return ret;
  695. for (i = 0; i < count; i++)
  696. ClearPageForeign(pages[i]);
  697. return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
  698. }
  699. EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
  700. #define GNTTAB_UNMAP_REFS_DELAY 5
  701. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
  702. static void gnttab_unmap_work(struct work_struct *work)
  703. {
  704. struct gntab_unmap_queue_data
  705. *unmap_data = container_of(work,
  706. struct gntab_unmap_queue_data,
  707. gnttab_work.work);
  708. if (unmap_data->age != UINT_MAX)
  709. unmap_data->age++;
  710. __gnttab_unmap_refs_async(unmap_data);
  711. }
  712. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  713. {
  714. int ret;
  715. int pc;
  716. for (pc = 0; pc < item->count; pc++) {
  717. if (page_count(item->pages[pc]) > 1) {
  718. unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
  719. schedule_delayed_work(&item->gnttab_work,
  720. msecs_to_jiffies(delay));
  721. return;
  722. }
  723. }
  724. ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
  725. item->pages, item->count);
  726. item->done(ret, item);
  727. }
  728. void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  729. {
  730. INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
  731. item->age = 0;
  732. __gnttab_unmap_refs_async(item);
  733. }
  734. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
  735. static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
  736. {
  737. int rc;
  738. rc = arch_gnttab_map_shared(frames, nr_gframes,
  739. gnttab_max_grant_frames(),
  740. &gnttab_shared.addr);
  741. BUG_ON(rc);
  742. return 0;
  743. }
  744. static void gnttab_unmap_frames_v1(void)
  745. {
  746. arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
  747. }
  748. static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
  749. {
  750. struct gnttab_setup_table setup;
  751. xen_pfn_t *frames;
  752. unsigned int nr_gframes = end_idx + 1;
  753. int rc;
  754. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  755. struct xen_add_to_physmap xatp;
  756. unsigned int i = end_idx;
  757. rc = 0;
  758. BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
  759. /*
  760. * Loop backwards, so that the first hypercall has the largest
  761. * index, ensuring that the table will grow only once.
  762. */
  763. do {
  764. xatp.domid = DOMID_SELF;
  765. xatp.idx = i;
  766. xatp.space = XENMAPSPACE_grant_table;
  767. xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
  768. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
  769. if (rc != 0) {
  770. pr_warn("grant table add_to_physmap failed, err=%d\n",
  771. rc);
  772. break;
  773. }
  774. } while (i-- > start_idx);
  775. return rc;
  776. }
  777. /* No need for kzalloc as it is initialized in following hypercall
  778. * GNTTABOP_setup_table.
  779. */
  780. frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
  781. if (!frames)
  782. return -ENOMEM;
  783. setup.dom = DOMID_SELF;
  784. setup.nr_frames = nr_gframes;
  785. set_xen_guest_handle(setup.frame_list, frames);
  786. rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
  787. if (rc == -ENOSYS) {
  788. kfree(frames);
  789. return -ENOSYS;
  790. }
  791. BUG_ON(rc || setup.status);
  792. rc = gnttab_interface->map_frames(frames, nr_gframes);
  793. kfree(frames);
  794. return rc;
  795. }
  796. static struct gnttab_ops gnttab_v1_ops = {
  797. .map_frames = gnttab_map_frames_v1,
  798. .unmap_frames = gnttab_unmap_frames_v1,
  799. .update_entry = gnttab_update_entry_v1,
  800. .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
  801. .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
  802. .query_foreign_access = gnttab_query_foreign_access_v1,
  803. };
  804. static void gnttab_request_version(void)
  805. {
  806. /* Only version 1 is used, which will always be available. */
  807. grant_table_version = 1;
  808. grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
  809. gnttab_interface = &gnttab_v1_ops;
  810. pr_info("Grant tables using version %d layout\n", grant_table_version);
  811. }
  812. static int gnttab_setup(void)
  813. {
  814. unsigned int max_nr_gframes;
  815. max_nr_gframes = gnttab_max_grant_frames();
  816. if (max_nr_gframes < nr_grant_frames)
  817. return -ENOSYS;
  818. if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
  819. gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
  820. if (gnttab_shared.addr == NULL) {
  821. pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
  822. (unsigned long)xen_auto_xlat_grant_frames.vaddr);
  823. return -ENOMEM;
  824. }
  825. }
  826. return gnttab_map(0, nr_grant_frames - 1);
  827. }
  828. int gnttab_resume(void)
  829. {
  830. gnttab_request_version();
  831. return gnttab_setup();
  832. }
  833. int gnttab_suspend(void)
  834. {
  835. if (!xen_feature(XENFEAT_auto_translated_physmap))
  836. gnttab_interface->unmap_frames();
  837. return 0;
  838. }
  839. static int gnttab_expand(unsigned int req_entries)
  840. {
  841. int rc;
  842. unsigned int cur, extra;
  843. BUG_ON(grefs_per_grant_frame == 0);
  844. cur = nr_grant_frames;
  845. extra = ((req_entries + (grefs_per_grant_frame-1)) /
  846. grefs_per_grant_frame);
  847. if (cur + extra > gnttab_max_grant_frames())
  848. return -ENOSPC;
  849. rc = gnttab_map(cur, cur + extra - 1);
  850. if (rc == 0)
  851. rc = grow_gnttab_list(extra);
  852. return rc;
  853. }
  854. int gnttab_init(void)
  855. {
  856. int i;
  857. unsigned long max_nr_grant_frames;
  858. unsigned int max_nr_glist_frames, nr_glist_frames;
  859. unsigned int nr_init_grefs;
  860. int ret;
  861. gnttab_request_version();
  862. max_nr_grant_frames = gnttab_max_grant_frames();
  863. nr_grant_frames = 1;
  864. /* Determine the maximum number of frames required for the
  865. * grant reference free list on the current hypervisor.
  866. */
  867. BUG_ON(grefs_per_grant_frame == 0);
  868. max_nr_glist_frames = (max_nr_grant_frames *
  869. grefs_per_grant_frame / RPP);
  870. gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
  871. GFP_KERNEL);
  872. if (gnttab_list == NULL)
  873. return -ENOMEM;
  874. nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
  875. for (i = 0; i < nr_glist_frames; i++) {
  876. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
  877. if (gnttab_list[i] == NULL) {
  878. ret = -ENOMEM;
  879. goto ini_nomem;
  880. }
  881. }
  882. ret = arch_gnttab_init(max_nr_grant_frames);
  883. if (ret < 0)
  884. goto ini_nomem;
  885. if (gnttab_setup() < 0) {
  886. ret = -ENODEV;
  887. goto ini_nomem;
  888. }
  889. nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
  890. for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
  891. gnttab_entry(i) = i + 1;
  892. gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
  893. gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
  894. gnttab_free_head = NR_RESERVED_ENTRIES;
  895. printk("Grant table initialized\n");
  896. return 0;
  897. ini_nomem:
  898. for (i--; i >= 0; i--)
  899. free_page((unsigned long)gnttab_list[i]);
  900. kfree(gnttab_list);
  901. return ret;
  902. }
  903. EXPORT_SYMBOL_GPL(gnttab_init);
  904. static int __gnttab_init(void)
  905. {
  906. /* Delay grant-table initialization in the PV on HVM case */
  907. if (xen_hvm_domain())
  908. return 0;
  909. if (!xen_pv_domain())
  910. return -ENODEV;
  911. return gnttab_init();
  912. }
  913. /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
  914. * beforehand to initialize xen_auto_xlat_grant_frames. */
  915. core_initcall_sync(__gnttab_init);