grant-table.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361
  1. /******************************************************************************
  2. * grant_table.c
  3. *
  4. * Granting foreign access to our memory reservation.
  5. *
  6. * Copyright (c) 2005-2006, Christopher Clark
  7. * Copyright (c) 2004-2005, K A Fraser
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation; or, when distributed
  12. * separately from the Linux kernel or incorporated into other
  13. * software packages, subject to the following license:
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a copy
  16. * of this source file (the "Software"), to deal in the Software without
  17. * restriction, including without limitation the rights to use, copy, modify,
  18. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19. * and to permit persons to whom the Software is furnished to do so, subject to
  20. * the following conditions:
  21. *
  22. * The above copyright notice and this permission notice shall be included in
  23. * all copies or substantial portions of the Software.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31. * IN THE SOFTWARE.
  32. */
  33. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  34. #include <linux/bootmem.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/slab.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/io.h>
  41. #include <linux/delay.h>
  42. #include <linux/hardirq.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/ratelimit.h>
  45. #include <linux/moduleparam.h>
  46. #include <xen/xen.h>
  47. #include <xen/interface/xen.h>
  48. #include <xen/page.h>
  49. #include <xen/grant_table.h>
  50. #include <xen/interface/memory.h>
  51. #include <xen/hvc-console.h>
  52. #include <xen/swiotlb-xen.h>
  53. #include <xen/balloon.h>
  54. #ifdef CONFIG_X86
  55. #include <asm/xen/cpuid.h>
  56. #endif
  57. #include <asm/xen/hypercall.h>
  58. #include <asm/xen/interface.h>
  59. #include <asm/pgtable.h>
  60. #include <asm/sync_bitops.h>
  61. /* External tools reserve first few grant table entries. */
  62. #define NR_RESERVED_ENTRIES 8
  63. #define GNTTAB_LIST_END 0xffffffff
  64. static grant_ref_t **gnttab_list;
  65. static unsigned int nr_grant_frames;
  66. static int gnttab_free_count;
  67. static grant_ref_t gnttab_free_head;
  68. static DEFINE_SPINLOCK(gnttab_list_lock);
  69. struct grant_frames xen_auto_xlat_grant_frames;
  70. static unsigned int xen_gnttab_version;
  71. module_param_named(version, xen_gnttab_version, uint, 0);
  72. static union {
  73. struct grant_entry_v1 *v1;
  74. union grant_entry_v2 *v2;
  75. void *addr;
  76. } gnttab_shared;
  77. /*This is a structure of function pointers for grant table*/
  78. struct gnttab_ops {
  79. /*
  80. * Version of the grant interface.
  81. */
  82. unsigned int version;
  83. /*
  84. * Grant refs per grant frame.
  85. */
  86. unsigned int grefs_per_grant_frame;
  87. /*
  88. * Mapping a list of frames for storing grant entries. Frames parameter
  89. * is used to store grant table address when grant table being setup,
  90. * nr_gframes is the number of frames to map grant table. Returning
  91. * GNTST_okay means success and negative value means failure.
  92. */
  93. int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
  94. /*
  95. * Release a list of frames which are mapped in map_frames for grant
  96. * entry status.
  97. */
  98. void (*unmap_frames)(void);
  99. /*
  100. * Introducing a valid entry into the grant table, granting the frame of
  101. * this grant entry to domain for accessing or transfering. Ref
  102. * parameter is reference of this introduced grant entry, domid is id of
  103. * granted domain, frame is the page frame to be granted, and flags is
  104. * status of the grant entry to be updated.
  105. */
  106. void (*update_entry)(grant_ref_t ref, domid_t domid,
  107. unsigned long frame, unsigned flags);
  108. /*
  109. * Stop granting a grant entry to domain for accessing. Ref parameter is
  110. * reference of a grant entry whose grant access will be stopped,
  111. * readonly is not in use in this function. If the grant entry is
  112. * currently mapped for reading or writing, just return failure(==0)
  113. * directly and don't tear down the grant access. Otherwise, stop grant
  114. * access for this entry and return success(==1).
  115. */
  116. int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
  117. /*
  118. * Stop granting a grant entry to domain for transfer. Ref parameter is
  119. * reference of a grant entry whose grant transfer will be stopped. If
  120. * tranfer has not started, just reclaim the grant entry and return
  121. * failure(==0). Otherwise, wait for the transfer to complete and then
  122. * return the frame.
  123. */
  124. unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
  125. /*
  126. * Query the status of a grant entry. Ref parameter is reference of
  127. * queried grant entry, return value is the status of queried entry.
  128. * Detailed status(writing/reading) can be gotten from the return value
  129. * by bit operations.
  130. */
  131. int (*query_foreign_access)(grant_ref_t ref);
  132. };
  133. struct unmap_refs_callback_data {
  134. struct completion completion;
  135. int result;
  136. };
  137. static const struct gnttab_ops *gnttab_interface;
  138. /* This reflects status of grant entries, so act as a global value. */
  139. static grant_status_t *grstatus;
  140. static struct gnttab_free_callback *gnttab_free_callback_list;
  141. static int gnttab_expand(unsigned int req_entries);
  142. #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
  143. #define SPP (PAGE_SIZE / sizeof(grant_status_t))
  144. static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
  145. {
  146. return &gnttab_list[(entry) / RPP][(entry) % RPP];
  147. }
  148. /* This can be used as an l-value */
  149. #define gnttab_entry(entry) (*__gnttab_entry(entry))
  150. static int get_free_entries(unsigned count)
  151. {
  152. unsigned long flags;
  153. int ref, rc = 0;
  154. grant_ref_t head;
  155. spin_lock_irqsave(&gnttab_list_lock, flags);
  156. if ((gnttab_free_count < count) &&
  157. ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
  158. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  159. return rc;
  160. }
  161. ref = head = gnttab_free_head;
  162. gnttab_free_count -= count;
  163. while (count-- > 1)
  164. head = gnttab_entry(head);
  165. gnttab_free_head = gnttab_entry(head);
  166. gnttab_entry(head) = GNTTAB_LIST_END;
  167. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  168. return ref;
  169. }
  170. static void do_free_callbacks(void)
  171. {
  172. struct gnttab_free_callback *callback, *next;
  173. callback = gnttab_free_callback_list;
  174. gnttab_free_callback_list = NULL;
  175. while (callback != NULL) {
  176. next = callback->next;
  177. if (gnttab_free_count >= callback->count) {
  178. callback->next = NULL;
  179. callback->fn(callback->arg);
  180. } else {
  181. callback->next = gnttab_free_callback_list;
  182. gnttab_free_callback_list = callback;
  183. }
  184. callback = next;
  185. }
  186. }
  187. static inline void check_free_callbacks(void)
  188. {
  189. if (unlikely(gnttab_free_callback_list))
  190. do_free_callbacks();
  191. }
  192. static void put_free_entry(grant_ref_t ref)
  193. {
  194. unsigned long flags;
  195. spin_lock_irqsave(&gnttab_list_lock, flags);
  196. gnttab_entry(ref) = gnttab_free_head;
  197. gnttab_free_head = ref;
  198. gnttab_free_count++;
  199. check_free_callbacks();
  200. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  201. }
  202. /*
  203. * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
  204. * Introducing a valid entry into the grant table:
  205. * 1. Write ent->domid.
  206. * 2. Write ent->frame:
  207. * GTF_permit_access: Frame to which access is permitted.
  208. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
  209. * frame, or zero if none.
  210. * 3. Write memory barrier (WMB).
  211. * 4. Write ent->flags, inc. valid type.
  212. */
  213. static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
  214. unsigned long frame, unsigned flags)
  215. {
  216. gnttab_shared.v1[ref].domid = domid;
  217. gnttab_shared.v1[ref].frame = frame;
  218. wmb();
  219. gnttab_shared.v1[ref].flags = flags;
  220. }
  221. static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
  222. unsigned long frame, unsigned int flags)
  223. {
  224. gnttab_shared.v2[ref].hdr.domid = domid;
  225. gnttab_shared.v2[ref].full_page.frame = frame;
  226. wmb(); /* Hypervisor concurrent accesses. */
  227. gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
  228. }
  229. /*
  230. * Public grant-issuing interface functions
  231. */
  232. void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
  233. unsigned long frame, int readonly)
  234. {
  235. gnttab_interface->update_entry(ref, domid, frame,
  236. GTF_permit_access | (readonly ? GTF_readonly : 0));
  237. }
  238. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
  239. int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
  240. int readonly)
  241. {
  242. int ref;
  243. ref = get_free_entries(1);
  244. if (unlikely(ref < 0))
  245. return -ENOSPC;
  246. gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
  247. return ref;
  248. }
  249. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
  250. static int gnttab_query_foreign_access_v1(grant_ref_t ref)
  251. {
  252. return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
  253. }
  254. static int gnttab_query_foreign_access_v2(grant_ref_t ref)
  255. {
  256. return grstatus[ref] & (GTF_reading|GTF_writing);
  257. }
  258. int gnttab_query_foreign_access(grant_ref_t ref)
  259. {
  260. return gnttab_interface->query_foreign_access(ref);
  261. }
  262. EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
  263. static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
  264. {
  265. u16 flags, nflags;
  266. u16 *pflags;
  267. pflags = &gnttab_shared.v1[ref].flags;
  268. nflags = *pflags;
  269. do {
  270. flags = nflags;
  271. if (flags & (GTF_reading|GTF_writing))
  272. return 0;
  273. } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
  274. return 1;
  275. }
  276. static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
  277. {
  278. gnttab_shared.v2[ref].hdr.flags = 0;
  279. mb(); /* Concurrent access by hypervisor. */
  280. if (grstatus[ref] & (GTF_reading|GTF_writing)) {
  281. return 0;
  282. } else {
  283. /*
  284. * The read of grstatus needs to have acquire semantics.
  285. * On x86, reads already have that, and we just need to
  286. * protect against compiler reorderings.
  287. * On other architectures we may need a full barrier.
  288. */
  289. #ifdef CONFIG_X86
  290. barrier();
  291. #else
  292. mb();
  293. #endif
  294. }
  295. return 1;
  296. }
  297. static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  298. {
  299. return gnttab_interface->end_foreign_access_ref(ref, readonly);
  300. }
  301. int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  302. {
  303. if (_gnttab_end_foreign_access_ref(ref, readonly))
  304. return 1;
  305. pr_warn("WARNING: g.e. %#x still in use!\n", ref);
  306. return 0;
  307. }
  308. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
  309. struct deferred_entry {
  310. struct list_head list;
  311. grant_ref_t ref;
  312. bool ro;
  313. uint16_t warn_delay;
  314. struct page *page;
  315. };
  316. static LIST_HEAD(deferred_list);
  317. static void gnttab_handle_deferred(struct timer_list *);
  318. static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
  319. static void gnttab_handle_deferred(struct timer_list *unused)
  320. {
  321. unsigned int nr = 10;
  322. struct deferred_entry *first = NULL;
  323. unsigned long flags;
  324. spin_lock_irqsave(&gnttab_list_lock, flags);
  325. while (nr--) {
  326. struct deferred_entry *entry
  327. = list_first_entry(&deferred_list,
  328. struct deferred_entry, list);
  329. if (entry == first)
  330. break;
  331. list_del(&entry->list);
  332. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  333. if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
  334. put_free_entry(entry->ref);
  335. if (entry->page) {
  336. pr_debug("freeing g.e. %#x (pfn %#lx)\n",
  337. entry->ref, page_to_pfn(entry->page));
  338. __free_page(entry->page);
  339. } else
  340. pr_info("freeing g.e. %#x\n", entry->ref);
  341. kfree(entry);
  342. entry = NULL;
  343. } else {
  344. if (!--entry->warn_delay)
  345. pr_info("g.e. %#x still pending\n", entry->ref);
  346. if (!first)
  347. first = entry;
  348. }
  349. spin_lock_irqsave(&gnttab_list_lock, flags);
  350. if (entry)
  351. list_add_tail(&entry->list, &deferred_list);
  352. else if (list_empty(&deferred_list))
  353. break;
  354. }
  355. if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
  356. deferred_timer.expires = jiffies + HZ;
  357. add_timer(&deferred_timer);
  358. }
  359. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  360. }
  361. static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
  362. struct page *page)
  363. {
  364. struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  365. const char *what = KERN_WARNING "leaking";
  366. if (entry) {
  367. unsigned long flags;
  368. entry->ref = ref;
  369. entry->ro = readonly;
  370. entry->page = page;
  371. entry->warn_delay = 60;
  372. spin_lock_irqsave(&gnttab_list_lock, flags);
  373. list_add_tail(&entry->list, &deferred_list);
  374. if (!timer_pending(&deferred_timer)) {
  375. deferred_timer.expires = jiffies + HZ;
  376. add_timer(&deferred_timer);
  377. }
  378. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  379. what = KERN_DEBUG "deferring";
  380. }
  381. printk("%s g.e. %#x (pfn %#lx)\n",
  382. what, ref, page ? page_to_pfn(page) : -1);
  383. }
  384. void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
  385. unsigned long page)
  386. {
  387. if (gnttab_end_foreign_access_ref(ref, readonly)) {
  388. put_free_entry(ref);
  389. if (page != 0)
  390. free_page(page);
  391. } else
  392. gnttab_add_deferred(ref, readonly,
  393. page ? virt_to_page(page) : NULL);
  394. }
  395. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
  396. int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
  397. {
  398. int ref;
  399. ref = get_free_entries(1);
  400. if (unlikely(ref < 0))
  401. return -ENOSPC;
  402. gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
  403. return ref;
  404. }
  405. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
  406. void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
  407. unsigned long pfn)
  408. {
  409. gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
  410. }
  411. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
  412. static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
  413. {
  414. unsigned long frame;
  415. u16 flags;
  416. u16 *pflags;
  417. pflags = &gnttab_shared.v1[ref].flags;
  418. /*
  419. * If a transfer is not even yet started, try to reclaim the grant
  420. * reference and return failure (== 0).
  421. */
  422. while (!((flags = *pflags) & GTF_transfer_committed)) {
  423. if (sync_cmpxchg(pflags, flags, 0) == flags)
  424. return 0;
  425. cpu_relax();
  426. }
  427. /* If a transfer is in progress then wait until it is completed. */
  428. while (!(flags & GTF_transfer_completed)) {
  429. flags = *pflags;
  430. cpu_relax();
  431. }
  432. rmb(); /* Read the frame number /after/ reading completion status. */
  433. frame = gnttab_shared.v1[ref].frame;
  434. BUG_ON(frame == 0);
  435. return frame;
  436. }
  437. static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
  438. {
  439. unsigned long frame;
  440. u16 flags;
  441. u16 *pflags;
  442. pflags = &gnttab_shared.v2[ref].hdr.flags;
  443. /*
  444. * If a transfer is not even yet started, try to reclaim the grant
  445. * reference and return failure (== 0).
  446. */
  447. while (!((flags = *pflags) & GTF_transfer_committed)) {
  448. if (sync_cmpxchg(pflags, flags, 0) == flags)
  449. return 0;
  450. cpu_relax();
  451. }
  452. /* If a transfer is in progress then wait until it is completed. */
  453. while (!(flags & GTF_transfer_completed)) {
  454. flags = *pflags;
  455. cpu_relax();
  456. }
  457. rmb(); /* Read the frame number /after/ reading completion status. */
  458. frame = gnttab_shared.v2[ref].full_page.frame;
  459. BUG_ON(frame == 0);
  460. return frame;
  461. }
  462. unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
  463. {
  464. return gnttab_interface->end_foreign_transfer_ref(ref);
  465. }
  466. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
  467. unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
  468. {
  469. unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  470. put_free_entry(ref);
  471. return frame;
  472. }
  473. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
  474. void gnttab_free_grant_reference(grant_ref_t ref)
  475. {
  476. put_free_entry(ref);
  477. }
  478. EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
  479. void gnttab_free_grant_references(grant_ref_t head)
  480. {
  481. grant_ref_t ref;
  482. unsigned long flags;
  483. int count = 1;
  484. if (head == GNTTAB_LIST_END)
  485. return;
  486. spin_lock_irqsave(&gnttab_list_lock, flags);
  487. ref = head;
  488. while (gnttab_entry(ref) != GNTTAB_LIST_END) {
  489. ref = gnttab_entry(ref);
  490. count++;
  491. }
  492. gnttab_entry(ref) = gnttab_free_head;
  493. gnttab_free_head = head;
  494. gnttab_free_count += count;
  495. check_free_callbacks();
  496. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  497. }
  498. EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
  499. int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
  500. {
  501. int h = get_free_entries(count);
  502. if (h < 0)
  503. return -ENOSPC;
  504. *head = h;
  505. return 0;
  506. }
  507. EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
  508. int gnttab_empty_grant_references(const grant_ref_t *private_head)
  509. {
  510. return (*private_head == GNTTAB_LIST_END);
  511. }
  512. EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
  513. int gnttab_claim_grant_reference(grant_ref_t *private_head)
  514. {
  515. grant_ref_t g = *private_head;
  516. if (unlikely(g == GNTTAB_LIST_END))
  517. return -ENOSPC;
  518. *private_head = gnttab_entry(g);
  519. return g;
  520. }
  521. EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
  522. void gnttab_release_grant_reference(grant_ref_t *private_head,
  523. grant_ref_t release)
  524. {
  525. gnttab_entry(release) = *private_head;
  526. *private_head = release;
  527. }
  528. EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
  529. void gnttab_request_free_callback(struct gnttab_free_callback *callback,
  530. void (*fn)(void *), void *arg, u16 count)
  531. {
  532. unsigned long flags;
  533. struct gnttab_free_callback *cb;
  534. spin_lock_irqsave(&gnttab_list_lock, flags);
  535. /* Check if the callback is already on the list */
  536. cb = gnttab_free_callback_list;
  537. while (cb) {
  538. if (cb == callback)
  539. goto out;
  540. cb = cb->next;
  541. }
  542. callback->fn = fn;
  543. callback->arg = arg;
  544. callback->count = count;
  545. callback->next = gnttab_free_callback_list;
  546. gnttab_free_callback_list = callback;
  547. check_free_callbacks();
  548. out:
  549. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  550. }
  551. EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
  552. void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
  553. {
  554. struct gnttab_free_callback **pcb;
  555. unsigned long flags;
  556. spin_lock_irqsave(&gnttab_list_lock, flags);
  557. for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
  558. if (*pcb == callback) {
  559. *pcb = callback->next;
  560. break;
  561. }
  562. }
  563. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  564. }
  565. EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
  566. static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
  567. {
  568. return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
  569. align;
  570. }
  571. static int grow_gnttab_list(unsigned int more_frames)
  572. {
  573. unsigned int new_nr_grant_frames, extra_entries, i;
  574. unsigned int nr_glist_frames, new_nr_glist_frames;
  575. unsigned int grefs_per_frame;
  576. BUG_ON(gnttab_interface == NULL);
  577. grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
  578. new_nr_grant_frames = nr_grant_frames + more_frames;
  579. extra_entries = more_frames * grefs_per_frame;
  580. nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
  581. new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
  582. for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
  583. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
  584. if (!gnttab_list[i])
  585. goto grow_nomem;
  586. }
  587. for (i = grefs_per_frame * nr_grant_frames;
  588. i < grefs_per_frame * new_nr_grant_frames - 1; i++)
  589. gnttab_entry(i) = i + 1;
  590. gnttab_entry(i) = gnttab_free_head;
  591. gnttab_free_head = grefs_per_frame * nr_grant_frames;
  592. gnttab_free_count += extra_entries;
  593. nr_grant_frames = new_nr_grant_frames;
  594. check_free_callbacks();
  595. return 0;
  596. grow_nomem:
  597. while (i-- > nr_glist_frames)
  598. free_page((unsigned long) gnttab_list[i]);
  599. return -ENOMEM;
  600. }
  601. static unsigned int __max_nr_grant_frames(void)
  602. {
  603. struct gnttab_query_size query;
  604. int rc;
  605. query.dom = DOMID_SELF;
  606. rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
  607. if ((rc < 0) || (query.status != GNTST_okay))
  608. return 4; /* Legacy max supported number of frames */
  609. return query.max_nr_frames;
  610. }
  611. unsigned int gnttab_max_grant_frames(void)
  612. {
  613. unsigned int xen_max = __max_nr_grant_frames();
  614. static unsigned int boot_max_nr_grant_frames;
  615. /* First time, initialize it properly. */
  616. if (!boot_max_nr_grant_frames)
  617. boot_max_nr_grant_frames = __max_nr_grant_frames();
  618. if (xen_max > boot_max_nr_grant_frames)
  619. return boot_max_nr_grant_frames;
  620. return xen_max;
  621. }
  622. EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
  623. int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
  624. {
  625. xen_pfn_t *pfn;
  626. unsigned int max_nr_gframes = __max_nr_grant_frames();
  627. unsigned int i;
  628. void *vaddr;
  629. if (xen_auto_xlat_grant_frames.count)
  630. return -EINVAL;
  631. vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
  632. if (vaddr == NULL) {
  633. pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
  634. &addr);
  635. return -ENOMEM;
  636. }
  637. pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
  638. if (!pfn) {
  639. xen_unmap(vaddr);
  640. return -ENOMEM;
  641. }
  642. for (i = 0; i < max_nr_gframes; i++)
  643. pfn[i] = XEN_PFN_DOWN(addr) + i;
  644. xen_auto_xlat_grant_frames.vaddr = vaddr;
  645. xen_auto_xlat_grant_frames.pfn = pfn;
  646. xen_auto_xlat_grant_frames.count = max_nr_gframes;
  647. return 0;
  648. }
  649. EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
  650. void gnttab_free_auto_xlat_frames(void)
  651. {
  652. if (!xen_auto_xlat_grant_frames.count)
  653. return;
  654. kfree(xen_auto_xlat_grant_frames.pfn);
  655. xen_unmap(xen_auto_xlat_grant_frames.vaddr);
  656. xen_auto_xlat_grant_frames.pfn = NULL;
  657. xen_auto_xlat_grant_frames.count = 0;
  658. xen_auto_xlat_grant_frames.vaddr = NULL;
  659. }
  660. EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
  661. /**
  662. * gnttab_alloc_pages - alloc pages suitable for grant mapping into
  663. * @nr_pages: number of pages to alloc
  664. * @pages: returns the pages
  665. */
  666. int gnttab_alloc_pages(int nr_pages, struct page **pages)
  667. {
  668. int i;
  669. int ret;
  670. ret = alloc_xenballooned_pages(nr_pages, pages);
  671. if (ret < 0)
  672. return ret;
  673. for (i = 0; i < nr_pages; i++) {
  674. #if BITS_PER_LONG < 64
  675. struct xen_page_foreign *foreign;
  676. foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
  677. if (!foreign) {
  678. gnttab_free_pages(nr_pages, pages);
  679. return -ENOMEM;
  680. }
  681. set_page_private(pages[i], (unsigned long)foreign);
  682. #endif
  683. SetPagePrivate(pages[i]);
  684. }
  685. return 0;
  686. }
  687. EXPORT_SYMBOL(gnttab_alloc_pages);
  688. /**
  689. * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
  690. * @nr_pages; number of pages to free
  691. * @pages: the pages
  692. */
  693. void gnttab_free_pages(int nr_pages, struct page **pages)
  694. {
  695. int i;
  696. for (i = 0; i < nr_pages; i++) {
  697. if (PagePrivate(pages[i])) {
  698. #if BITS_PER_LONG < 64
  699. kfree((void *)page_private(pages[i]));
  700. #endif
  701. ClearPagePrivate(pages[i]);
  702. }
  703. }
  704. free_xenballooned_pages(nr_pages, pages);
  705. }
  706. EXPORT_SYMBOL(gnttab_free_pages);
  707. /* Handling of paged out grant targets (GNTST_eagain) */
  708. #define MAX_DELAY 256
  709. static inline void
  710. gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
  711. const char *func)
  712. {
  713. unsigned delay = 1;
  714. do {
  715. BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
  716. if (*status == GNTST_eagain)
  717. msleep(delay++);
  718. } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
  719. if (delay >= MAX_DELAY) {
  720. pr_err("%s: %s eagain grant\n", func, current->comm);
  721. *status = GNTST_bad_page;
  722. }
  723. }
  724. void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
  725. {
  726. struct gnttab_map_grant_ref *op;
  727. if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
  728. BUG();
  729. for (op = batch; op < batch + count; op++)
  730. if (op->status == GNTST_eagain)
  731. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
  732. &op->status, __func__);
  733. }
  734. EXPORT_SYMBOL_GPL(gnttab_batch_map);
  735. void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
  736. {
  737. struct gnttab_copy *op;
  738. if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
  739. BUG();
  740. for (op = batch; op < batch + count; op++)
  741. if (op->status == GNTST_eagain)
  742. gnttab_retry_eagain_gop(GNTTABOP_copy, op,
  743. &op->status, __func__);
  744. }
  745. EXPORT_SYMBOL_GPL(gnttab_batch_copy);
  746. void gnttab_foreach_grant_in_range(struct page *page,
  747. unsigned int offset,
  748. unsigned int len,
  749. xen_grant_fn_t fn,
  750. void *data)
  751. {
  752. unsigned int goffset;
  753. unsigned int glen;
  754. unsigned long xen_pfn;
  755. len = min_t(unsigned int, PAGE_SIZE - offset, len);
  756. goffset = xen_offset_in_page(offset);
  757. xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
  758. while (len) {
  759. glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
  760. fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
  761. goffset = 0;
  762. xen_pfn++;
  763. len -= glen;
  764. }
  765. }
  766. EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
  767. void gnttab_foreach_grant(struct page **pages,
  768. unsigned int nr_grefs,
  769. xen_grant_fn_t fn,
  770. void *data)
  771. {
  772. unsigned int goffset = 0;
  773. unsigned long xen_pfn = 0;
  774. unsigned int i;
  775. for (i = 0; i < nr_grefs; i++) {
  776. if ((i % XEN_PFN_PER_PAGE) == 0) {
  777. xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
  778. goffset = 0;
  779. }
  780. fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
  781. goffset += XEN_PAGE_SIZE;
  782. xen_pfn++;
  783. }
  784. }
  785. int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
  786. struct gnttab_map_grant_ref *kmap_ops,
  787. struct page **pages, unsigned int count)
  788. {
  789. int i, ret;
  790. ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
  791. if (ret)
  792. return ret;
  793. for (i = 0; i < count; i++) {
  794. /* Retry eagain maps */
  795. if (map_ops[i].status == GNTST_eagain)
  796. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
  797. &map_ops[i].status, __func__);
  798. if (map_ops[i].status == GNTST_okay) {
  799. struct xen_page_foreign *foreign;
  800. SetPageForeign(pages[i]);
  801. foreign = xen_page_foreign(pages[i]);
  802. foreign->domid = map_ops[i].dom;
  803. foreign->gref = map_ops[i].ref;
  804. }
  805. }
  806. return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
  807. }
  808. EXPORT_SYMBOL_GPL(gnttab_map_refs);
  809. int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
  810. struct gnttab_unmap_grant_ref *kunmap_ops,
  811. struct page **pages, unsigned int count)
  812. {
  813. unsigned int i;
  814. int ret;
  815. ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
  816. if (ret)
  817. return ret;
  818. for (i = 0; i < count; i++)
  819. ClearPageForeign(pages[i]);
  820. return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
  821. }
  822. EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
  823. #define GNTTAB_UNMAP_REFS_DELAY 5
  824. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
  825. static void gnttab_unmap_work(struct work_struct *work)
  826. {
  827. struct gntab_unmap_queue_data
  828. *unmap_data = container_of(work,
  829. struct gntab_unmap_queue_data,
  830. gnttab_work.work);
  831. if (unmap_data->age != UINT_MAX)
  832. unmap_data->age++;
  833. __gnttab_unmap_refs_async(unmap_data);
  834. }
  835. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  836. {
  837. int ret;
  838. int pc;
  839. for (pc = 0; pc < item->count; pc++) {
  840. if (page_count(item->pages[pc]) > 1) {
  841. unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
  842. schedule_delayed_work(&item->gnttab_work,
  843. msecs_to_jiffies(delay));
  844. return;
  845. }
  846. }
  847. ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
  848. item->pages, item->count);
  849. item->done(ret, item);
  850. }
  851. void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  852. {
  853. INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
  854. item->age = 0;
  855. __gnttab_unmap_refs_async(item);
  856. }
  857. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
  858. static void unmap_refs_callback(int result,
  859. struct gntab_unmap_queue_data *data)
  860. {
  861. struct unmap_refs_callback_data *d = data->data;
  862. d->result = result;
  863. complete(&d->completion);
  864. }
  865. int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
  866. {
  867. struct unmap_refs_callback_data data;
  868. init_completion(&data.completion);
  869. item->data = &data;
  870. item->done = &unmap_refs_callback;
  871. gnttab_unmap_refs_async(item);
  872. wait_for_completion(&data.completion);
  873. return data.result;
  874. }
  875. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
  876. static unsigned int nr_status_frames(unsigned int nr_grant_frames)
  877. {
  878. BUG_ON(gnttab_interface == NULL);
  879. return gnttab_frames(nr_grant_frames, SPP);
  880. }
  881. static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
  882. {
  883. int rc;
  884. rc = arch_gnttab_map_shared(frames, nr_gframes,
  885. gnttab_max_grant_frames(),
  886. &gnttab_shared.addr);
  887. BUG_ON(rc);
  888. return 0;
  889. }
  890. static void gnttab_unmap_frames_v1(void)
  891. {
  892. arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
  893. }
  894. static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
  895. {
  896. uint64_t *sframes;
  897. unsigned int nr_sframes;
  898. struct gnttab_get_status_frames getframes;
  899. int rc;
  900. nr_sframes = nr_status_frames(nr_gframes);
  901. /* No need for kzalloc as it is initialized in following hypercall
  902. * GNTTABOP_get_status_frames.
  903. */
  904. sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
  905. if (!sframes)
  906. return -ENOMEM;
  907. getframes.dom = DOMID_SELF;
  908. getframes.nr_frames = nr_sframes;
  909. set_xen_guest_handle(getframes.frame_list, sframes);
  910. rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
  911. &getframes, 1);
  912. if (rc == -ENOSYS) {
  913. kfree(sframes);
  914. return -ENOSYS;
  915. }
  916. BUG_ON(rc || getframes.status);
  917. rc = arch_gnttab_map_status(sframes, nr_sframes,
  918. nr_status_frames(gnttab_max_grant_frames()),
  919. &grstatus);
  920. BUG_ON(rc);
  921. kfree(sframes);
  922. rc = arch_gnttab_map_shared(frames, nr_gframes,
  923. gnttab_max_grant_frames(),
  924. &gnttab_shared.addr);
  925. BUG_ON(rc);
  926. return 0;
  927. }
  928. static void gnttab_unmap_frames_v2(void)
  929. {
  930. arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
  931. arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
  932. }
  933. static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
  934. {
  935. struct gnttab_setup_table setup;
  936. xen_pfn_t *frames;
  937. unsigned int nr_gframes = end_idx + 1;
  938. int rc;
  939. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  940. struct xen_add_to_physmap xatp;
  941. unsigned int i = end_idx;
  942. rc = 0;
  943. BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
  944. /*
  945. * Loop backwards, so that the first hypercall has the largest
  946. * index, ensuring that the table will grow only once.
  947. */
  948. do {
  949. xatp.domid = DOMID_SELF;
  950. xatp.idx = i;
  951. xatp.space = XENMAPSPACE_grant_table;
  952. xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
  953. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
  954. if (rc != 0) {
  955. pr_warn("grant table add_to_physmap failed, err=%d\n",
  956. rc);
  957. break;
  958. }
  959. } while (i-- > start_idx);
  960. return rc;
  961. }
  962. /* No need for kzalloc as it is initialized in following hypercall
  963. * GNTTABOP_setup_table.
  964. */
  965. frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
  966. if (!frames)
  967. return -ENOMEM;
  968. setup.dom = DOMID_SELF;
  969. setup.nr_frames = nr_gframes;
  970. set_xen_guest_handle(setup.frame_list, frames);
  971. rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
  972. if (rc == -ENOSYS) {
  973. kfree(frames);
  974. return -ENOSYS;
  975. }
  976. BUG_ON(rc || setup.status);
  977. rc = gnttab_interface->map_frames(frames, nr_gframes);
  978. kfree(frames);
  979. return rc;
  980. }
  981. static const struct gnttab_ops gnttab_v1_ops = {
  982. .version = 1,
  983. .grefs_per_grant_frame = XEN_PAGE_SIZE /
  984. sizeof(struct grant_entry_v1),
  985. .map_frames = gnttab_map_frames_v1,
  986. .unmap_frames = gnttab_unmap_frames_v1,
  987. .update_entry = gnttab_update_entry_v1,
  988. .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
  989. .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
  990. .query_foreign_access = gnttab_query_foreign_access_v1,
  991. };
  992. static const struct gnttab_ops gnttab_v2_ops = {
  993. .version = 2,
  994. .grefs_per_grant_frame = XEN_PAGE_SIZE /
  995. sizeof(union grant_entry_v2),
  996. .map_frames = gnttab_map_frames_v2,
  997. .unmap_frames = gnttab_unmap_frames_v2,
  998. .update_entry = gnttab_update_entry_v2,
  999. .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
  1000. .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
  1001. .query_foreign_access = gnttab_query_foreign_access_v2,
  1002. };
  1003. static bool gnttab_need_v2(void)
  1004. {
  1005. #ifdef CONFIG_X86
  1006. uint32_t base, width;
  1007. if (xen_pv_domain()) {
  1008. base = xen_cpuid_base();
  1009. if (cpuid_eax(base) < 5)
  1010. return false; /* Information not available, use V1. */
  1011. width = cpuid_ebx(base + 5) &
  1012. XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
  1013. return width > 32 + PAGE_SHIFT;
  1014. }
  1015. #endif
  1016. return !!(max_possible_pfn >> 32);
  1017. }
  1018. static void gnttab_request_version(void)
  1019. {
  1020. long rc;
  1021. struct gnttab_set_version gsv;
  1022. if (gnttab_need_v2())
  1023. gsv.version = 2;
  1024. else
  1025. gsv.version = 1;
  1026. /* Boot parameter overrides automatic selection. */
  1027. if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
  1028. gsv.version = xen_gnttab_version;
  1029. rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
  1030. if (rc == 0 && gsv.version == 2)
  1031. gnttab_interface = &gnttab_v2_ops;
  1032. else
  1033. gnttab_interface = &gnttab_v1_ops;
  1034. pr_info("Grant tables using version %d layout\n",
  1035. gnttab_interface->version);
  1036. }
  1037. static int gnttab_setup(void)
  1038. {
  1039. unsigned int max_nr_gframes;
  1040. max_nr_gframes = gnttab_max_grant_frames();
  1041. if (max_nr_gframes < nr_grant_frames)
  1042. return -ENOSYS;
  1043. if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
  1044. gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
  1045. if (gnttab_shared.addr == NULL) {
  1046. pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
  1047. (unsigned long)xen_auto_xlat_grant_frames.vaddr);
  1048. return -ENOMEM;
  1049. }
  1050. }
  1051. return gnttab_map(0, nr_grant_frames - 1);
  1052. }
  1053. int gnttab_resume(void)
  1054. {
  1055. gnttab_request_version();
  1056. return gnttab_setup();
  1057. }
  1058. int gnttab_suspend(void)
  1059. {
  1060. if (!xen_feature(XENFEAT_auto_translated_physmap))
  1061. gnttab_interface->unmap_frames();
  1062. return 0;
  1063. }
  1064. static int gnttab_expand(unsigned int req_entries)
  1065. {
  1066. int rc;
  1067. unsigned int cur, extra;
  1068. BUG_ON(gnttab_interface == NULL);
  1069. cur = nr_grant_frames;
  1070. extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
  1071. gnttab_interface->grefs_per_grant_frame);
  1072. if (cur + extra > gnttab_max_grant_frames()) {
  1073. pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
  1074. " cur=%u extra=%u limit=%u"
  1075. " gnttab_free_count=%u req_entries=%u\n",
  1076. cur, extra, gnttab_max_grant_frames(),
  1077. gnttab_free_count, req_entries);
  1078. return -ENOSPC;
  1079. }
  1080. rc = gnttab_map(cur, cur + extra - 1);
  1081. if (rc == 0)
  1082. rc = grow_gnttab_list(extra);
  1083. return rc;
  1084. }
  1085. int gnttab_init(void)
  1086. {
  1087. int i;
  1088. unsigned long max_nr_grant_frames;
  1089. unsigned int max_nr_glist_frames, nr_glist_frames;
  1090. unsigned int nr_init_grefs;
  1091. int ret;
  1092. gnttab_request_version();
  1093. max_nr_grant_frames = gnttab_max_grant_frames();
  1094. nr_grant_frames = 1;
  1095. /* Determine the maximum number of frames required for the
  1096. * grant reference free list on the current hypervisor.
  1097. */
  1098. BUG_ON(gnttab_interface == NULL);
  1099. max_nr_glist_frames = (max_nr_grant_frames *
  1100. gnttab_interface->grefs_per_grant_frame / RPP);
  1101. gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
  1102. GFP_KERNEL);
  1103. if (gnttab_list == NULL)
  1104. return -ENOMEM;
  1105. nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
  1106. for (i = 0; i < nr_glist_frames; i++) {
  1107. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
  1108. if (gnttab_list[i] == NULL) {
  1109. ret = -ENOMEM;
  1110. goto ini_nomem;
  1111. }
  1112. }
  1113. ret = arch_gnttab_init(max_nr_grant_frames,
  1114. nr_status_frames(max_nr_grant_frames));
  1115. if (ret < 0)
  1116. goto ini_nomem;
  1117. if (gnttab_setup() < 0) {
  1118. ret = -ENODEV;
  1119. goto ini_nomem;
  1120. }
  1121. nr_init_grefs = nr_grant_frames *
  1122. gnttab_interface->grefs_per_grant_frame;
  1123. for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
  1124. gnttab_entry(i) = i + 1;
  1125. gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
  1126. gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
  1127. gnttab_free_head = NR_RESERVED_ENTRIES;
  1128. printk("Grant table initialized\n");
  1129. return 0;
  1130. ini_nomem:
  1131. for (i--; i >= 0; i--)
  1132. free_page((unsigned long)gnttab_list[i]);
  1133. kfree(gnttab_list);
  1134. return ret;
  1135. }
  1136. EXPORT_SYMBOL_GPL(gnttab_init);
  1137. static int __gnttab_init(void)
  1138. {
  1139. if (!xen_domain())
  1140. return -ENODEV;
  1141. /* Delay grant-table initialization in the PV on HVM case */
  1142. if (xen_hvm_domain() && !xen_pvh_domain())
  1143. return 0;
  1144. return gnttab_init();
  1145. }
  1146. /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
  1147. * beforehand to initialize xen_auto_xlat_grant_frames. */
  1148. core_initcall_sync(__gnttab_init);