xen-blkfront.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699
  1. /*
  2. * blkfront.c
  3. *
  4. * XenLinux virtual block device driver.
  5. *
  6. * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
  7. * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
  8. * Copyright (c) 2004, Christian Limpach
  9. * Copyright (c) 2004, Andrew Warfield
  10. * Copyright (c) 2005, Christopher Clark
  11. * Copyright (c) 2005, XenSource Ltd
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License version 2
  15. * as published by the Free Software Foundation; or, when distributed
  16. * separately from the Linux kernel or incorporated into other
  17. * software packages, subject to the following license:
  18. *
  19. * Permission is hereby granted, free of charge, to any person obtaining a copy
  20. * of this source file (the "Software"), to deal in the Software without
  21. * restriction, including without limitation the rights to use, copy, modify,
  22. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  23. * and to permit persons to whom the Software is furnished to do so, subject to
  24. * the following conditions:
  25. *
  26. * The above copyright notice and this permission notice shall be included in
  27. * all copies or substantial portions of the Software.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  30. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  31. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  32. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  33. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  34. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  35. * IN THE SOFTWARE.
  36. */
  37. #include <linux/interrupt.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/blk-mq.h>
  40. #include <linux/hdreg.h>
  41. #include <linux/cdrom.h>
  42. #include <linux/module.h>
  43. #include <linux/slab.h>
  44. #include <linux/mutex.h>
  45. #include <linux/scatterlist.h>
  46. #include <linux/bitmap.h>
  47. #include <linux/list.h>
  48. #include <xen/xen.h>
  49. #include <xen/xenbus.h>
  50. #include <xen/grant_table.h>
  51. #include <xen/events.h>
  52. #include <xen/page.h>
  53. #include <xen/platform_pci.h>
  54. #include <xen/interface/grant_table.h>
  55. #include <xen/interface/io/blkif.h>
  56. #include <xen/interface/io/protocols.h>
  57. #include <asm/xen/hypervisor.h>
  58. /*
  59. * The minimal size of segment supported by the block framework is PAGE_SIZE.
  60. * When Linux is using a different page size than Xen, it may not be possible
  61. * to put all the data in a single segment.
  62. * This can happen when the backend doesn't support indirect descriptor and
  63. * therefore the maximum amount of data that a request can carry is
  64. * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
  65. *
  66. * Note that we only support one extra request. So the Linux page size
  67. * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
  68. * 88KB.
  69. */
  70. #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
  71. enum blkif_state {
  72. BLKIF_STATE_DISCONNECTED,
  73. BLKIF_STATE_CONNECTED,
  74. BLKIF_STATE_SUSPENDED,
  75. };
  76. struct grant {
  77. grant_ref_t gref;
  78. struct page *page;
  79. struct list_head node;
  80. };
  81. enum blk_req_status {
  82. REQ_WAITING,
  83. REQ_DONE,
  84. REQ_ERROR,
  85. REQ_EOPNOTSUPP,
  86. };
  87. struct blk_shadow {
  88. struct blkif_request req;
  89. struct request *request;
  90. struct grant **grants_used;
  91. struct grant **indirect_grants;
  92. struct scatterlist *sg;
  93. unsigned int num_sg;
  94. enum blk_req_status status;
  95. #define NO_ASSOCIATED_ID ~0UL
  96. /*
  97. * Id of the sibling if we ever need 2 requests when handling a
  98. * block I/O request
  99. */
  100. unsigned long associated_id;
  101. };
  102. struct split_bio {
  103. struct bio *bio;
  104. atomic_t pending;
  105. };
  106. static DEFINE_MUTEX(blkfront_mutex);
  107. static const struct block_device_operations xlvbd_block_fops;
  108. /*
  109. * Maximum number of segments in indirect requests, the actual value used by
  110. * the frontend driver is the minimum of this value and the value provided
  111. * by the backend driver.
  112. */
  113. static unsigned int xen_blkif_max_segments = 32;
  114. module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
  115. S_IRUGO);
  116. MODULE_PARM_DESC(max_indirect_segments,
  117. "Maximum amount of segments in indirect requests (default is 32)");
  118. static unsigned int xen_blkif_max_queues = 4;
  119. module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
  120. MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
  121. /*
  122. * Maximum order of pages to be used for the shared ring between front and
  123. * backend, 4KB page granularity is used.
  124. */
  125. static unsigned int xen_blkif_max_ring_order;
  126. module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
  127. MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
  128. #define BLK_RING_SIZE(info) \
  129. __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
  130. #define BLK_MAX_RING_SIZE \
  131. __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
  132. /*
  133. * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
  134. * characters are enough. Define to 20 to keep consistent with backend.
  135. */
  136. #define RINGREF_NAME_LEN (20)
  137. /*
  138. * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
  139. */
  140. #define QUEUE_NAME_LEN (17)
  141. /*
  142. * Per-ring info.
  143. * Every blkfront device can associate with one or more blkfront_ring_info,
  144. * depending on how many hardware queues/rings to be used.
  145. */
  146. struct blkfront_ring_info {
  147. /* Lock to protect data in every ring buffer. */
  148. spinlock_t ring_lock;
  149. struct blkif_front_ring ring;
  150. unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
  151. unsigned int evtchn, irq;
  152. struct work_struct work;
  153. struct gnttab_free_callback callback;
  154. struct blk_shadow shadow[BLK_MAX_RING_SIZE];
  155. struct list_head indirect_pages;
  156. struct list_head grants;
  157. unsigned int persistent_gnts_c;
  158. unsigned long shadow_free;
  159. struct blkfront_info *dev_info;
  160. };
  161. /*
  162. * We have one of these per vbd, whether ide, scsi or 'other'. They
  163. * hang in private_data off the gendisk structure. We may end up
  164. * putting all kinds of interesting stuff here :-)
  165. */
  166. struct blkfront_info
  167. {
  168. struct mutex mutex;
  169. struct xenbus_device *xbdev;
  170. struct gendisk *gd;
  171. u16 sector_size;
  172. unsigned int physical_sector_size;
  173. int vdevice;
  174. blkif_vdev_t handle;
  175. enum blkif_state connected;
  176. /* Number of pages per ring buffer. */
  177. unsigned int nr_ring_pages;
  178. struct request_queue *rq;
  179. unsigned int feature_flush;
  180. unsigned int feature_fua;
  181. unsigned int feature_discard:1;
  182. unsigned int feature_secdiscard:1;
  183. unsigned int discard_granularity;
  184. unsigned int discard_alignment;
  185. unsigned int feature_persistent:1;
  186. /* Number of 4KB segments handled */
  187. unsigned int max_indirect_segments;
  188. int is_ready;
  189. struct blk_mq_tag_set tag_set;
  190. struct blkfront_ring_info *rinfo;
  191. unsigned int nr_rings;
  192. /* Save uncomplete reqs and bios for migration. */
  193. struct list_head requests;
  194. struct bio_list bio_list;
  195. };
  196. static unsigned int nr_minors;
  197. static unsigned long *minors;
  198. static DEFINE_SPINLOCK(minor_lock);
  199. #define GRANT_INVALID_REF 0
  200. #define PARTS_PER_DISK 16
  201. #define PARTS_PER_EXT_DISK 256
  202. #define BLKIF_MAJOR(dev) ((dev)>>8)
  203. #define BLKIF_MINOR(dev) ((dev) & 0xff)
  204. #define EXT_SHIFT 28
  205. #define EXTENDED (1<<EXT_SHIFT)
  206. #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
  207. #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
  208. #define EMULATED_HD_DISK_MINOR_OFFSET (0)
  209. #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
  210. #define EMULATED_SD_DISK_MINOR_OFFSET (0)
  211. #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
  212. #define DEV_NAME "xvd" /* name in /dev */
  213. /*
  214. * Grants are always the same size as a Xen page (i.e 4KB).
  215. * A physical segment is always the same size as a Linux page.
  216. * Number of grants per physical segment
  217. */
  218. #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
  219. #define GRANTS_PER_INDIRECT_FRAME \
  220. (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
  221. #define PSEGS_PER_INDIRECT_FRAME \
  222. (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
  223. #define INDIRECT_GREFS(_grants) \
  224. DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
  225. #define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
  226. static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
  227. static void blkfront_gather_backend_features(struct blkfront_info *info);
  228. static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
  229. {
  230. unsigned long free = rinfo->shadow_free;
  231. BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
  232. rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
  233. rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
  234. return free;
  235. }
  236. static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
  237. unsigned long id)
  238. {
  239. if (rinfo->shadow[id].req.u.rw.id != id)
  240. return -EINVAL;
  241. if (rinfo->shadow[id].request == NULL)
  242. return -EINVAL;
  243. rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
  244. rinfo->shadow[id].request = NULL;
  245. rinfo->shadow_free = id;
  246. return 0;
  247. }
  248. static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
  249. {
  250. struct blkfront_info *info = rinfo->dev_info;
  251. struct page *granted_page;
  252. struct grant *gnt_list_entry, *n;
  253. int i = 0;
  254. while (i < num) {
  255. gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
  256. if (!gnt_list_entry)
  257. goto out_of_memory;
  258. if (info->feature_persistent) {
  259. granted_page = alloc_page(GFP_NOIO);
  260. if (!granted_page) {
  261. kfree(gnt_list_entry);
  262. goto out_of_memory;
  263. }
  264. gnt_list_entry->page = granted_page;
  265. }
  266. gnt_list_entry->gref = GRANT_INVALID_REF;
  267. list_add(&gnt_list_entry->node, &rinfo->grants);
  268. i++;
  269. }
  270. return 0;
  271. out_of_memory:
  272. list_for_each_entry_safe(gnt_list_entry, n,
  273. &rinfo->grants, node) {
  274. list_del(&gnt_list_entry->node);
  275. if (info->feature_persistent)
  276. __free_page(gnt_list_entry->page);
  277. kfree(gnt_list_entry);
  278. i--;
  279. }
  280. BUG_ON(i != 0);
  281. return -ENOMEM;
  282. }
  283. static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
  284. {
  285. struct grant *gnt_list_entry;
  286. BUG_ON(list_empty(&rinfo->grants));
  287. gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
  288. node);
  289. list_del(&gnt_list_entry->node);
  290. if (gnt_list_entry->gref != GRANT_INVALID_REF)
  291. rinfo->persistent_gnts_c--;
  292. return gnt_list_entry;
  293. }
  294. static inline void grant_foreign_access(const struct grant *gnt_list_entry,
  295. const struct blkfront_info *info)
  296. {
  297. gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
  298. info->xbdev->otherend_id,
  299. gnt_list_entry->page,
  300. 0);
  301. }
  302. static struct grant *get_grant(grant_ref_t *gref_head,
  303. unsigned long gfn,
  304. struct blkfront_ring_info *rinfo)
  305. {
  306. struct grant *gnt_list_entry = get_free_grant(rinfo);
  307. struct blkfront_info *info = rinfo->dev_info;
  308. if (gnt_list_entry->gref != GRANT_INVALID_REF)
  309. return gnt_list_entry;
  310. /* Assign a gref to this page */
  311. gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
  312. BUG_ON(gnt_list_entry->gref == -ENOSPC);
  313. if (info->feature_persistent)
  314. grant_foreign_access(gnt_list_entry, info);
  315. else {
  316. /* Grant access to the GFN passed by the caller */
  317. gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
  318. info->xbdev->otherend_id,
  319. gfn, 0);
  320. }
  321. return gnt_list_entry;
  322. }
  323. static struct grant *get_indirect_grant(grant_ref_t *gref_head,
  324. struct blkfront_ring_info *rinfo)
  325. {
  326. struct grant *gnt_list_entry = get_free_grant(rinfo);
  327. struct blkfront_info *info = rinfo->dev_info;
  328. if (gnt_list_entry->gref != GRANT_INVALID_REF)
  329. return gnt_list_entry;
  330. /* Assign a gref to this page */
  331. gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
  332. BUG_ON(gnt_list_entry->gref == -ENOSPC);
  333. if (!info->feature_persistent) {
  334. struct page *indirect_page;
  335. /* Fetch a pre-allocated page to use for indirect grefs */
  336. BUG_ON(list_empty(&rinfo->indirect_pages));
  337. indirect_page = list_first_entry(&rinfo->indirect_pages,
  338. struct page, lru);
  339. list_del(&indirect_page->lru);
  340. gnt_list_entry->page = indirect_page;
  341. }
  342. grant_foreign_access(gnt_list_entry, info);
  343. return gnt_list_entry;
  344. }
  345. static const char *op_name(int op)
  346. {
  347. static const char *const names[] = {
  348. [BLKIF_OP_READ] = "read",
  349. [BLKIF_OP_WRITE] = "write",
  350. [BLKIF_OP_WRITE_BARRIER] = "barrier",
  351. [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
  352. [BLKIF_OP_DISCARD] = "discard" };
  353. if (op < 0 || op >= ARRAY_SIZE(names))
  354. return "unknown";
  355. if (!names[op])
  356. return "reserved";
  357. return names[op];
  358. }
  359. static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
  360. {
  361. unsigned int end = minor + nr;
  362. int rc;
  363. if (end > nr_minors) {
  364. unsigned long *bitmap, *old;
  365. bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
  366. GFP_KERNEL);
  367. if (bitmap == NULL)
  368. return -ENOMEM;
  369. spin_lock(&minor_lock);
  370. if (end > nr_minors) {
  371. old = minors;
  372. memcpy(bitmap, minors,
  373. BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
  374. minors = bitmap;
  375. nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
  376. } else
  377. old = bitmap;
  378. spin_unlock(&minor_lock);
  379. kfree(old);
  380. }
  381. spin_lock(&minor_lock);
  382. if (find_next_bit(minors, end, minor) >= end) {
  383. bitmap_set(minors, minor, nr);
  384. rc = 0;
  385. } else
  386. rc = -EBUSY;
  387. spin_unlock(&minor_lock);
  388. return rc;
  389. }
  390. static void xlbd_release_minors(unsigned int minor, unsigned int nr)
  391. {
  392. unsigned int end = minor + nr;
  393. BUG_ON(end > nr_minors);
  394. spin_lock(&minor_lock);
  395. bitmap_clear(minors, minor, nr);
  396. spin_unlock(&minor_lock);
  397. }
  398. static void blkif_restart_queue_callback(void *arg)
  399. {
  400. struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
  401. schedule_work(&rinfo->work);
  402. }
  403. static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
  404. {
  405. /* We don't have real geometry info, but let's at least return
  406. values consistent with the size of the device */
  407. sector_t nsect = get_capacity(bd->bd_disk);
  408. sector_t cylinders = nsect;
  409. hg->heads = 0xff;
  410. hg->sectors = 0x3f;
  411. sector_div(cylinders, hg->heads * hg->sectors);
  412. hg->cylinders = cylinders;
  413. if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
  414. hg->cylinders = 0xffff;
  415. return 0;
  416. }
  417. static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
  418. unsigned command, unsigned long argument)
  419. {
  420. struct blkfront_info *info = bdev->bd_disk->private_data;
  421. int i;
  422. dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
  423. command, (long)argument);
  424. switch (command) {
  425. case CDROMMULTISESSION:
  426. dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
  427. for (i = 0; i < sizeof(struct cdrom_multisession); i++)
  428. if (put_user(0, (char __user *)(argument + i)))
  429. return -EFAULT;
  430. return 0;
  431. case CDROM_GET_CAPABILITY: {
  432. struct gendisk *gd = info->gd;
  433. if (gd->flags & GENHD_FL_CD)
  434. return 0;
  435. return -EINVAL;
  436. }
  437. default:
  438. /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
  439. command);*/
  440. return -EINVAL; /* same return as native Linux */
  441. }
  442. return 0;
  443. }
  444. static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
  445. struct request *req,
  446. struct blkif_request **ring_req)
  447. {
  448. unsigned long id;
  449. *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
  450. rinfo->ring.req_prod_pvt++;
  451. id = get_id_from_freelist(rinfo);
  452. rinfo->shadow[id].request = req;
  453. rinfo->shadow[id].status = REQ_WAITING;
  454. rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
  455. (*ring_req)->u.rw.id = id;
  456. return id;
  457. }
  458. static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
  459. {
  460. struct blkfront_info *info = rinfo->dev_info;
  461. struct blkif_request *ring_req;
  462. unsigned long id;
  463. /* Fill out a communications ring structure. */
  464. id = blkif_ring_get_request(rinfo, req, &ring_req);
  465. ring_req->operation = BLKIF_OP_DISCARD;
  466. ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
  467. ring_req->u.discard.id = id;
  468. ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
  469. if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
  470. ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
  471. else
  472. ring_req->u.discard.flag = 0;
  473. /* Keep a private copy so we can reissue requests when recovering. */
  474. rinfo->shadow[id].req = *ring_req;
  475. return 0;
  476. }
  477. struct setup_rw_req {
  478. unsigned int grant_idx;
  479. struct blkif_request_segment *segments;
  480. struct blkfront_ring_info *rinfo;
  481. struct blkif_request *ring_req;
  482. grant_ref_t gref_head;
  483. unsigned int id;
  484. /* Only used when persistent grant is used and it's a read request */
  485. bool need_copy;
  486. unsigned int bvec_off;
  487. char *bvec_data;
  488. bool require_extra_req;
  489. struct blkif_request *extra_ring_req;
  490. };
  491. static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
  492. unsigned int len, void *data)
  493. {
  494. struct setup_rw_req *setup = data;
  495. int n, ref;
  496. struct grant *gnt_list_entry;
  497. unsigned int fsect, lsect;
  498. /* Convenient aliases */
  499. unsigned int grant_idx = setup->grant_idx;
  500. struct blkif_request *ring_req = setup->ring_req;
  501. struct blkfront_ring_info *rinfo = setup->rinfo;
  502. /*
  503. * We always use the shadow of the first request to store the list
  504. * of grant associated to the block I/O request. This made the
  505. * completion more easy to handle even if the block I/O request is
  506. * split.
  507. */
  508. struct blk_shadow *shadow = &rinfo->shadow[setup->id];
  509. if (unlikely(setup->require_extra_req &&
  510. grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
  511. /*
  512. * We are using the second request, setup grant_idx
  513. * to be the index of the segment array.
  514. */
  515. grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
  516. ring_req = setup->extra_ring_req;
  517. }
  518. if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
  519. (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
  520. if (setup->segments)
  521. kunmap_atomic(setup->segments);
  522. n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
  523. gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
  524. shadow->indirect_grants[n] = gnt_list_entry;
  525. setup->segments = kmap_atomic(gnt_list_entry->page);
  526. ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
  527. }
  528. gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
  529. ref = gnt_list_entry->gref;
  530. /*
  531. * All the grants are stored in the shadow of the first
  532. * request. Therefore we have to use the global index.
  533. */
  534. shadow->grants_used[setup->grant_idx] = gnt_list_entry;
  535. if (setup->need_copy) {
  536. void *shared_data;
  537. shared_data = kmap_atomic(gnt_list_entry->page);
  538. /*
  539. * this does not wipe data stored outside the
  540. * range sg->offset..sg->offset+sg->length.
  541. * Therefore, blkback *could* see data from
  542. * previous requests. This is OK as long as
  543. * persistent grants are shared with just one
  544. * domain. It may need refactoring if this
  545. * changes
  546. */
  547. memcpy(shared_data + offset,
  548. setup->bvec_data + setup->bvec_off,
  549. len);
  550. kunmap_atomic(shared_data);
  551. setup->bvec_off += len;
  552. }
  553. fsect = offset >> 9;
  554. lsect = fsect + (len >> 9) - 1;
  555. if (ring_req->operation != BLKIF_OP_INDIRECT) {
  556. ring_req->u.rw.seg[grant_idx] =
  557. (struct blkif_request_segment) {
  558. .gref = ref,
  559. .first_sect = fsect,
  560. .last_sect = lsect };
  561. } else {
  562. setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
  563. (struct blkif_request_segment) {
  564. .gref = ref,
  565. .first_sect = fsect,
  566. .last_sect = lsect };
  567. }
  568. (setup->grant_idx)++;
  569. }
  570. static void blkif_setup_extra_req(struct blkif_request *first,
  571. struct blkif_request *second)
  572. {
  573. uint16_t nr_segments = first->u.rw.nr_segments;
  574. /*
  575. * The second request is only present when the first request uses
  576. * all its segments. It's always the continuity of the first one.
  577. */
  578. first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
  579. second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
  580. second->u.rw.sector_number = first->u.rw.sector_number +
  581. (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
  582. second->u.rw.handle = first->u.rw.handle;
  583. second->operation = first->operation;
  584. }
  585. static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
  586. {
  587. struct blkfront_info *info = rinfo->dev_info;
  588. struct blkif_request *ring_req, *extra_ring_req = NULL;
  589. unsigned long id, extra_id = NO_ASSOCIATED_ID;
  590. bool require_extra_req = false;
  591. int i;
  592. struct setup_rw_req setup = {
  593. .grant_idx = 0,
  594. .segments = NULL,
  595. .rinfo = rinfo,
  596. .need_copy = rq_data_dir(req) && info->feature_persistent,
  597. };
  598. /*
  599. * Used to store if we are able to queue the request by just using
  600. * existing persistent grants, or if we have to get new grants,
  601. * as there are not sufficiently many free.
  602. */
  603. struct scatterlist *sg;
  604. int num_sg, max_grefs, num_grant;
  605. max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
  606. if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
  607. /*
  608. * If we are using indirect segments we need to account
  609. * for the indirect grefs used in the request.
  610. */
  611. max_grefs += INDIRECT_GREFS(max_grefs);
  612. /*
  613. * We have to reserve 'max_grefs' grants because persistent
  614. * grants are shared by all rings.
  615. */
  616. if (max_grefs > 0)
  617. if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
  618. gnttab_request_free_callback(
  619. &rinfo->callback,
  620. blkif_restart_queue_callback,
  621. rinfo,
  622. max_grefs);
  623. return 1;
  624. }
  625. /* Fill out a communications ring structure. */
  626. id = blkif_ring_get_request(rinfo, req, &ring_req);
  627. num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
  628. num_grant = 0;
  629. /* Calculate the number of grant used */
  630. for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
  631. num_grant += gnttab_count_grant(sg->offset, sg->length);
  632. require_extra_req = info->max_indirect_segments == 0 &&
  633. num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
  634. BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
  635. rinfo->shadow[id].num_sg = num_sg;
  636. if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
  637. likely(!require_extra_req)) {
  638. /*
  639. * The indirect operation can only be a BLKIF_OP_READ or
  640. * BLKIF_OP_WRITE
  641. */
  642. BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
  643. ring_req->operation = BLKIF_OP_INDIRECT;
  644. ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
  645. BLKIF_OP_WRITE : BLKIF_OP_READ;
  646. ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
  647. ring_req->u.indirect.handle = info->handle;
  648. ring_req->u.indirect.nr_segments = num_grant;
  649. } else {
  650. ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
  651. ring_req->u.rw.handle = info->handle;
  652. ring_req->operation = rq_data_dir(req) ?
  653. BLKIF_OP_WRITE : BLKIF_OP_READ;
  654. if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
  655. /*
  656. * Ideally we can do an unordered flush-to-disk.
  657. * In case the backend onlysupports barriers, use that.
  658. * A barrier request a superset of FUA, so we can
  659. * implement it the same way. (It's also a FLUSH+FUA,
  660. * since it is guaranteed ordered WRT previous writes.)
  661. */
  662. if (info->feature_flush && info->feature_fua)
  663. ring_req->operation =
  664. BLKIF_OP_WRITE_BARRIER;
  665. else if (info->feature_flush)
  666. ring_req->operation =
  667. BLKIF_OP_FLUSH_DISKCACHE;
  668. else
  669. ring_req->operation = 0;
  670. }
  671. ring_req->u.rw.nr_segments = num_grant;
  672. if (unlikely(require_extra_req)) {
  673. extra_id = blkif_ring_get_request(rinfo, req,
  674. &extra_ring_req);
  675. /*
  676. * Only the first request contains the scatter-gather
  677. * list.
  678. */
  679. rinfo->shadow[extra_id].num_sg = 0;
  680. blkif_setup_extra_req(ring_req, extra_ring_req);
  681. /* Link the 2 requests together */
  682. rinfo->shadow[extra_id].associated_id = id;
  683. rinfo->shadow[id].associated_id = extra_id;
  684. }
  685. }
  686. setup.ring_req = ring_req;
  687. setup.id = id;
  688. setup.require_extra_req = require_extra_req;
  689. if (unlikely(require_extra_req))
  690. setup.extra_ring_req = extra_ring_req;
  691. for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
  692. BUG_ON(sg->offset + sg->length > PAGE_SIZE);
  693. if (setup.need_copy) {
  694. setup.bvec_off = sg->offset;
  695. setup.bvec_data = kmap_atomic(sg_page(sg));
  696. }
  697. gnttab_foreach_grant_in_range(sg_page(sg),
  698. sg->offset,
  699. sg->length,
  700. blkif_setup_rw_req_grant,
  701. &setup);
  702. if (setup.need_copy)
  703. kunmap_atomic(setup.bvec_data);
  704. }
  705. if (setup.segments)
  706. kunmap_atomic(setup.segments);
  707. /* Keep a private copy so we can reissue requests when recovering. */
  708. rinfo->shadow[id].req = *ring_req;
  709. if (unlikely(require_extra_req))
  710. rinfo->shadow[extra_id].req = *extra_ring_req;
  711. if (max_grefs > 0)
  712. gnttab_free_grant_references(setup.gref_head);
  713. return 0;
  714. }
  715. /*
  716. * Generate a Xen blkfront IO request from a blk layer request. Reads
  717. * and writes are handled as expected.
  718. *
  719. * @req: a request struct
  720. */
  721. static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
  722. {
  723. if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
  724. return 1;
  725. if (unlikely(req_op(req) == REQ_OP_DISCARD ||
  726. req_op(req) == REQ_OP_SECURE_ERASE))
  727. return blkif_queue_discard_req(req, rinfo);
  728. else
  729. return blkif_queue_rw_req(req, rinfo);
  730. }
  731. static inline void flush_requests(struct blkfront_ring_info *rinfo)
  732. {
  733. int notify;
  734. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
  735. if (notify)
  736. notify_remote_via_irq(rinfo->irq);
  737. }
  738. static inline bool blkif_request_flush_invalid(struct request *req,
  739. struct blkfront_info *info)
  740. {
  741. return ((req->cmd_type != REQ_TYPE_FS) ||
  742. ((req_op(req) == REQ_OP_FLUSH) &&
  743. !info->feature_flush) ||
  744. ((req->cmd_flags & REQ_FUA) &&
  745. !info->feature_fua));
  746. }
  747. static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
  748. const struct blk_mq_queue_data *qd)
  749. {
  750. unsigned long flags;
  751. int qid = hctx->queue_num;
  752. struct blkfront_info *info = hctx->queue->queuedata;
  753. struct blkfront_ring_info *rinfo = NULL;
  754. BUG_ON(info->nr_rings <= qid);
  755. rinfo = &info->rinfo[qid];
  756. blk_mq_start_request(qd->rq);
  757. spin_lock_irqsave(&rinfo->ring_lock, flags);
  758. if (RING_FULL(&rinfo->ring))
  759. goto out_busy;
  760. if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
  761. goto out_err;
  762. if (blkif_queue_request(qd->rq, rinfo))
  763. goto out_busy;
  764. flush_requests(rinfo);
  765. spin_unlock_irqrestore(&rinfo->ring_lock, flags);
  766. return BLK_MQ_RQ_QUEUE_OK;
  767. out_err:
  768. spin_unlock_irqrestore(&rinfo->ring_lock, flags);
  769. return BLK_MQ_RQ_QUEUE_ERROR;
  770. out_busy:
  771. spin_unlock_irqrestore(&rinfo->ring_lock, flags);
  772. blk_mq_stop_hw_queue(hctx);
  773. return BLK_MQ_RQ_QUEUE_BUSY;
  774. }
  775. static struct blk_mq_ops blkfront_mq_ops = {
  776. .queue_rq = blkif_queue_rq,
  777. };
  778. static void blkif_set_queue_limits(struct blkfront_info *info)
  779. {
  780. struct request_queue *rq = info->rq;
  781. struct gendisk *gd = info->gd;
  782. unsigned int segments = info->max_indirect_segments ? :
  783. BLKIF_MAX_SEGMENTS_PER_REQUEST;
  784. queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
  785. if (info->feature_discard) {
  786. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
  787. blk_queue_max_discard_sectors(rq, get_capacity(gd));
  788. rq->limits.discard_granularity = info->discard_granularity;
  789. rq->limits.discard_alignment = info->discard_alignment;
  790. if (info->feature_secdiscard)
  791. queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
  792. }
  793. /* Hard sector size and max sectors impersonate the equiv. hardware. */
  794. blk_queue_logical_block_size(rq, info->sector_size);
  795. blk_queue_physical_block_size(rq, info->physical_sector_size);
  796. blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
  797. /* Each segment in a request is up to an aligned page in size. */
  798. blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
  799. blk_queue_max_segment_size(rq, PAGE_SIZE);
  800. /* Ensure a merged request will fit in a single I/O ring slot. */
  801. blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
  802. /* Make sure buffer addresses are sector-aligned. */
  803. blk_queue_dma_alignment(rq, 511);
  804. /* Make sure we don't use bounce buffers. */
  805. blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
  806. }
  807. static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
  808. unsigned int physical_sector_size)
  809. {
  810. struct request_queue *rq;
  811. struct blkfront_info *info = gd->private_data;
  812. memset(&info->tag_set, 0, sizeof(info->tag_set));
  813. info->tag_set.ops = &blkfront_mq_ops;
  814. info->tag_set.nr_hw_queues = info->nr_rings;
  815. if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
  816. /*
  817. * When indirect descriptior is not supported, the I/O request
  818. * will be split between multiple request in the ring.
  819. * To avoid problems when sending the request, divide by
  820. * 2 the depth of the queue.
  821. */
  822. info->tag_set.queue_depth = BLK_RING_SIZE(info) / 2;
  823. } else
  824. info->tag_set.queue_depth = BLK_RING_SIZE(info);
  825. info->tag_set.numa_node = NUMA_NO_NODE;
  826. info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
  827. info->tag_set.cmd_size = 0;
  828. info->tag_set.driver_data = info;
  829. if (blk_mq_alloc_tag_set(&info->tag_set))
  830. return -EINVAL;
  831. rq = blk_mq_init_queue(&info->tag_set);
  832. if (IS_ERR(rq)) {
  833. blk_mq_free_tag_set(&info->tag_set);
  834. return PTR_ERR(rq);
  835. }
  836. rq->queuedata = info;
  837. info->rq = gd->queue = rq;
  838. info->gd = gd;
  839. info->sector_size = sector_size;
  840. info->physical_sector_size = physical_sector_size;
  841. blkif_set_queue_limits(info);
  842. return 0;
  843. }
  844. static const char *flush_info(struct blkfront_info *info)
  845. {
  846. if (info->feature_flush && info->feature_fua)
  847. return "barrier: enabled;";
  848. else if (info->feature_flush)
  849. return "flush diskcache: enabled;";
  850. else
  851. return "barrier or flush: disabled;";
  852. }
  853. static void xlvbd_flush(struct blkfront_info *info)
  854. {
  855. blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
  856. info->feature_fua ? true : false);
  857. pr_info("blkfront: %s: %s %s %s %s %s\n",
  858. info->gd->disk_name, flush_info(info),
  859. "persistent grants:", info->feature_persistent ?
  860. "enabled;" : "disabled;", "indirect descriptors:",
  861. info->max_indirect_segments ? "enabled;" : "disabled;");
  862. }
  863. static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
  864. {
  865. int major;
  866. major = BLKIF_MAJOR(vdevice);
  867. *minor = BLKIF_MINOR(vdevice);
  868. switch (major) {
  869. case XEN_IDE0_MAJOR:
  870. *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
  871. *minor = ((*minor / 64) * PARTS_PER_DISK) +
  872. EMULATED_HD_DISK_MINOR_OFFSET;
  873. break;
  874. case XEN_IDE1_MAJOR:
  875. *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
  876. *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
  877. EMULATED_HD_DISK_MINOR_OFFSET;
  878. break;
  879. case XEN_SCSI_DISK0_MAJOR:
  880. *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
  881. *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
  882. break;
  883. case XEN_SCSI_DISK1_MAJOR:
  884. case XEN_SCSI_DISK2_MAJOR:
  885. case XEN_SCSI_DISK3_MAJOR:
  886. case XEN_SCSI_DISK4_MAJOR:
  887. case XEN_SCSI_DISK5_MAJOR:
  888. case XEN_SCSI_DISK6_MAJOR:
  889. case XEN_SCSI_DISK7_MAJOR:
  890. *offset = (*minor / PARTS_PER_DISK) +
  891. ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
  892. EMULATED_SD_DISK_NAME_OFFSET;
  893. *minor = *minor +
  894. ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
  895. EMULATED_SD_DISK_MINOR_OFFSET;
  896. break;
  897. case XEN_SCSI_DISK8_MAJOR:
  898. case XEN_SCSI_DISK9_MAJOR:
  899. case XEN_SCSI_DISK10_MAJOR:
  900. case XEN_SCSI_DISK11_MAJOR:
  901. case XEN_SCSI_DISK12_MAJOR:
  902. case XEN_SCSI_DISK13_MAJOR:
  903. case XEN_SCSI_DISK14_MAJOR:
  904. case XEN_SCSI_DISK15_MAJOR:
  905. *offset = (*minor / PARTS_PER_DISK) +
  906. ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
  907. EMULATED_SD_DISK_NAME_OFFSET;
  908. *minor = *minor +
  909. ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
  910. EMULATED_SD_DISK_MINOR_OFFSET;
  911. break;
  912. case XENVBD_MAJOR:
  913. *offset = *minor / PARTS_PER_DISK;
  914. break;
  915. default:
  916. printk(KERN_WARNING "blkfront: your disk configuration is "
  917. "incorrect, please use an xvd device instead\n");
  918. return -ENODEV;
  919. }
  920. return 0;
  921. }
  922. static char *encode_disk_name(char *ptr, unsigned int n)
  923. {
  924. if (n >= 26)
  925. ptr = encode_disk_name(ptr, n / 26 - 1);
  926. *ptr = 'a' + n % 26;
  927. return ptr + 1;
  928. }
  929. static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
  930. struct blkfront_info *info,
  931. u16 vdisk_info, u16 sector_size,
  932. unsigned int physical_sector_size)
  933. {
  934. struct gendisk *gd;
  935. int nr_minors = 1;
  936. int err;
  937. unsigned int offset;
  938. int minor;
  939. int nr_parts;
  940. char *ptr;
  941. BUG_ON(info->gd != NULL);
  942. BUG_ON(info->rq != NULL);
  943. if ((info->vdevice>>EXT_SHIFT) > 1) {
  944. /* this is above the extended range; something is wrong */
  945. printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
  946. return -ENODEV;
  947. }
  948. if (!VDEV_IS_EXTENDED(info->vdevice)) {
  949. err = xen_translate_vdev(info->vdevice, &minor, &offset);
  950. if (err)
  951. return err;
  952. nr_parts = PARTS_PER_DISK;
  953. } else {
  954. minor = BLKIF_MINOR_EXT(info->vdevice);
  955. nr_parts = PARTS_PER_EXT_DISK;
  956. offset = minor / nr_parts;
  957. if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
  958. printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
  959. "emulated IDE disks,\n\t choose an xvd device name"
  960. "from xvde on\n", info->vdevice);
  961. }
  962. if (minor >> MINORBITS) {
  963. pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
  964. info->vdevice, minor);
  965. return -ENODEV;
  966. }
  967. if ((minor % nr_parts) == 0)
  968. nr_minors = nr_parts;
  969. err = xlbd_reserve_minors(minor, nr_minors);
  970. if (err)
  971. goto out;
  972. err = -ENODEV;
  973. gd = alloc_disk(nr_minors);
  974. if (gd == NULL)
  975. goto release;
  976. strcpy(gd->disk_name, DEV_NAME);
  977. ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
  978. BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
  979. if (nr_minors > 1)
  980. *ptr = 0;
  981. else
  982. snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
  983. "%d", minor & (nr_parts - 1));
  984. gd->major = XENVBD_MAJOR;
  985. gd->first_minor = minor;
  986. gd->fops = &xlvbd_block_fops;
  987. gd->private_data = info;
  988. set_capacity(gd, capacity);
  989. if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
  990. del_gendisk(gd);
  991. goto release;
  992. }
  993. xlvbd_flush(info);
  994. if (vdisk_info & VDISK_READONLY)
  995. set_disk_ro(gd, 1);
  996. if (vdisk_info & VDISK_REMOVABLE)
  997. gd->flags |= GENHD_FL_REMOVABLE;
  998. if (vdisk_info & VDISK_CDROM)
  999. gd->flags |= GENHD_FL_CD;
  1000. return 0;
  1001. release:
  1002. xlbd_release_minors(minor, nr_minors);
  1003. out:
  1004. return err;
  1005. }
  1006. static void xlvbd_release_gendisk(struct blkfront_info *info)
  1007. {
  1008. unsigned int minor, nr_minors, i;
  1009. if (info->rq == NULL)
  1010. return;
  1011. /* No more blkif_request(). */
  1012. blk_mq_stop_hw_queues(info->rq);
  1013. for (i = 0; i < info->nr_rings; i++) {
  1014. struct blkfront_ring_info *rinfo = &info->rinfo[i];
  1015. /* No more gnttab callback work. */
  1016. gnttab_cancel_free_callback(&rinfo->callback);
  1017. /* Flush gnttab callback work. Must be done with no locks held. */
  1018. flush_work(&rinfo->work);
  1019. }
  1020. del_gendisk(info->gd);
  1021. minor = info->gd->first_minor;
  1022. nr_minors = info->gd->minors;
  1023. xlbd_release_minors(minor, nr_minors);
  1024. blk_cleanup_queue(info->rq);
  1025. blk_mq_free_tag_set(&info->tag_set);
  1026. info->rq = NULL;
  1027. put_disk(info->gd);
  1028. info->gd = NULL;
  1029. }
  1030. /* Already hold rinfo->ring_lock. */
  1031. static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
  1032. {
  1033. if (!RING_FULL(&rinfo->ring))
  1034. blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
  1035. }
  1036. static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
  1037. {
  1038. unsigned long flags;
  1039. spin_lock_irqsave(&rinfo->ring_lock, flags);
  1040. kick_pending_request_queues_locked(rinfo);
  1041. spin_unlock_irqrestore(&rinfo->ring_lock, flags);
  1042. }
  1043. static void blkif_restart_queue(struct work_struct *work)
  1044. {
  1045. struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
  1046. if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
  1047. kick_pending_request_queues(rinfo);
  1048. }
  1049. static void blkif_free_ring(struct blkfront_ring_info *rinfo)
  1050. {
  1051. struct grant *persistent_gnt, *n;
  1052. struct blkfront_info *info = rinfo->dev_info;
  1053. int i, j, segs;
  1054. /*
  1055. * Remove indirect pages, this only happens when using indirect
  1056. * descriptors but not persistent grants
  1057. */
  1058. if (!list_empty(&rinfo->indirect_pages)) {
  1059. struct page *indirect_page, *n;
  1060. BUG_ON(info->feature_persistent);
  1061. list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
  1062. list_del(&indirect_page->lru);
  1063. __free_page(indirect_page);
  1064. }
  1065. }
  1066. /* Remove all persistent grants. */
  1067. if (!list_empty(&rinfo->grants)) {
  1068. list_for_each_entry_safe(persistent_gnt, n,
  1069. &rinfo->grants, node) {
  1070. list_del(&persistent_gnt->node);
  1071. if (persistent_gnt->gref != GRANT_INVALID_REF) {
  1072. gnttab_end_foreign_access(persistent_gnt->gref,
  1073. 0, 0UL);
  1074. rinfo->persistent_gnts_c--;
  1075. }
  1076. if (info->feature_persistent)
  1077. __free_page(persistent_gnt->page);
  1078. kfree(persistent_gnt);
  1079. }
  1080. }
  1081. BUG_ON(rinfo->persistent_gnts_c != 0);
  1082. for (i = 0; i < BLK_RING_SIZE(info); i++) {
  1083. /*
  1084. * Clear persistent grants present in requests already
  1085. * on the shared ring
  1086. */
  1087. if (!rinfo->shadow[i].request)
  1088. goto free_shadow;
  1089. segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
  1090. rinfo->shadow[i].req.u.indirect.nr_segments :
  1091. rinfo->shadow[i].req.u.rw.nr_segments;
  1092. for (j = 0; j < segs; j++) {
  1093. persistent_gnt = rinfo->shadow[i].grants_used[j];
  1094. gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
  1095. if (info->feature_persistent)
  1096. __free_page(persistent_gnt->page);
  1097. kfree(persistent_gnt);
  1098. }
  1099. if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
  1100. /*
  1101. * If this is not an indirect operation don't try to
  1102. * free indirect segments
  1103. */
  1104. goto free_shadow;
  1105. for (j = 0; j < INDIRECT_GREFS(segs); j++) {
  1106. persistent_gnt = rinfo->shadow[i].indirect_grants[j];
  1107. gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
  1108. __free_page(persistent_gnt->page);
  1109. kfree(persistent_gnt);
  1110. }
  1111. free_shadow:
  1112. kfree(rinfo->shadow[i].grants_used);
  1113. rinfo->shadow[i].grants_used = NULL;
  1114. kfree(rinfo->shadow[i].indirect_grants);
  1115. rinfo->shadow[i].indirect_grants = NULL;
  1116. kfree(rinfo->shadow[i].sg);
  1117. rinfo->shadow[i].sg = NULL;
  1118. }
  1119. /* No more gnttab callback work. */
  1120. gnttab_cancel_free_callback(&rinfo->callback);
  1121. /* Flush gnttab callback work. Must be done with no locks held. */
  1122. flush_work(&rinfo->work);
  1123. /* Free resources associated with old device channel. */
  1124. for (i = 0; i < info->nr_ring_pages; i++) {
  1125. if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
  1126. gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
  1127. rinfo->ring_ref[i] = GRANT_INVALID_REF;
  1128. }
  1129. }
  1130. free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
  1131. rinfo->ring.sring = NULL;
  1132. if (rinfo->irq)
  1133. unbind_from_irqhandler(rinfo->irq, rinfo);
  1134. rinfo->evtchn = rinfo->irq = 0;
  1135. }
  1136. static void blkif_free(struct blkfront_info *info, int suspend)
  1137. {
  1138. unsigned int i;
  1139. /* Prevent new requests being issued until we fix things up. */
  1140. info->connected = suspend ?
  1141. BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
  1142. /* No more blkif_request(). */
  1143. if (info->rq)
  1144. blk_mq_stop_hw_queues(info->rq);
  1145. for (i = 0; i < info->nr_rings; i++)
  1146. blkif_free_ring(&info->rinfo[i]);
  1147. kfree(info->rinfo);
  1148. info->rinfo = NULL;
  1149. info->nr_rings = 0;
  1150. }
  1151. struct copy_from_grant {
  1152. const struct blk_shadow *s;
  1153. unsigned int grant_idx;
  1154. unsigned int bvec_offset;
  1155. char *bvec_data;
  1156. };
  1157. static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
  1158. unsigned int len, void *data)
  1159. {
  1160. struct copy_from_grant *info = data;
  1161. char *shared_data;
  1162. /* Convenient aliases */
  1163. const struct blk_shadow *s = info->s;
  1164. shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
  1165. memcpy(info->bvec_data + info->bvec_offset,
  1166. shared_data + offset, len);
  1167. info->bvec_offset += len;
  1168. info->grant_idx++;
  1169. kunmap_atomic(shared_data);
  1170. }
  1171. static enum blk_req_status blkif_rsp_to_req_status(int rsp)
  1172. {
  1173. switch (rsp)
  1174. {
  1175. case BLKIF_RSP_OKAY:
  1176. return REQ_DONE;
  1177. case BLKIF_RSP_EOPNOTSUPP:
  1178. return REQ_EOPNOTSUPP;
  1179. case BLKIF_RSP_ERROR:
  1180. /* Fallthrough. */
  1181. default:
  1182. return REQ_ERROR;
  1183. }
  1184. }
  1185. /*
  1186. * Get the final status of the block request based on two ring response
  1187. */
  1188. static int blkif_get_final_status(enum blk_req_status s1,
  1189. enum blk_req_status s2)
  1190. {
  1191. BUG_ON(s1 == REQ_WAITING);
  1192. BUG_ON(s2 == REQ_WAITING);
  1193. if (s1 == REQ_ERROR || s2 == REQ_ERROR)
  1194. return BLKIF_RSP_ERROR;
  1195. else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
  1196. return BLKIF_RSP_EOPNOTSUPP;
  1197. return BLKIF_RSP_OKAY;
  1198. }
  1199. static bool blkif_completion(unsigned long *id,
  1200. struct blkfront_ring_info *rinfo,
  1201. struct blkif_response *bret)
  1202. {
  1203. int i = 0;
  1204. struct scatterlist *sg;
  1205. int num_sg, num_grant;
  1206. struct blkfront_info *info = rinfo->dev_info;
  1207. struct blk_shadow *s = &rinfo->shadow[*id];
  1208. struct copy_from_grant data = {
  1209. .grant_idx = 0,
  1210. };
  1211. num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
  1212. s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
  1213. /* The I/O request may be split in two. */
  1214. if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
  1215. struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
  1216. /* Keep the status of the current response in shadow. */
  1217. s->status = blkif_rsp_to_req_status(bret->status);
  1218. /* Wait the second response if not yet here. */
  1219. if (s2->status == REQ_WAITING)
  1220. return 0;
  1221. bret->status = blkif_get_final_status(s->status,
  1222. s2->status);
  1223. /*
  1224. * All the grants is stored in the first shadow in order
  1225. * to make the completion code simpler.
  1226. */
  1227. num_grant += s2->req.u.rw.nr_segments;
  1228. /*
  1229. * The two responses may not come in order. Only the
  1230. * first request will store the scatter-gather list.
  1231. */
  1232. if (s2->num_sg != 0) {
  1233. /* Update "id" with the ID of the first response. */
  1234. *id = s->associated_id;
  1235. s = s2;
  1236. }
  1237. /*
  1238. * We don't need anymore the second request, so recycling
  1239. * it now.
  1240. */
  1241. if (add_id_to_freelist(rinfo, s->associated_id))
  1242. WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
  1243. info->gd->disk_name, s->associated_id);
  1244. }
  1245. data.s = s;
  1246. num_sg = s->num_sg;
  1247. if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
  1248. for_each_sg(s->sg, sg, num_sg, i) {
  1249. BUG_ON(sg->offset + sg->length > PAGE_SIZE);
  1250. data.bvec_offset = sg->offset;
  1251. data.bvec_data = kmap_atomic(sg_page(sg));
  1252. gnttab_foreach_grant_in_range(sg_page(sg),
  1253. sg->offset,
  1254. sg->length,
  1255. blkif_copy_from_grant,
  1256. &data);
  1257. kunmap_atomic(data.bvec_data);
  1258. }
  1259. }
  1260. /* Add the persistent grant into the list of free grants */
  1261. for (i = 0; i < num_grant; i++) {
  1262. if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
  1263. /*
  1264. * If the grant is still mapped by the backend (the
  1265. * backend has chosen to make this grant persistent)
  1266. * we add it at the head of the list, so it will be
  1267. * reused first.
  1268. */
  1269. if (!info->feature_persistent)
  1270. pr_alert_ratelimited("backed has not unmapped grant: %u\n",
  1271. s->grants_used[i]->gref);
  1272. list_add(&s->grants_used[i]->node, &rinfo->grants);
  1273. rinfo->persistent_gnts_c++;
  1274. } else {
  1275. /*
  1276. * If the grant is not mapped by the backend we end the
  1277. * foreign access and add it to the tail of the list,
  1278. * so it will not be picked again unless we run out of
  1279. * persistent grants.
  1280. */
  1281. gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
  1282. s->grants_used[i]->gref = GRANT_INVALID_REF;
  1283. list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
  1284. }
  1285. }
  1286. if (s->req.operation == BLKIF_OP_INDIRECT) {
  1287. for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
  1288. if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
  1289. if (!info->feature_persistent)
  1290. pr_alert_ratelimited("backed has not unmapped grant: %u\n",
  1291. s->indirect_grants[i]->gref);
  1292. list_add(&s->indirect_grants[i]->node, &rinfo->grants);
  1293. rinfo->persistent_gnts_c++;
  1294. } else {
  1295. struct page *indirect_page;
  1296. gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
  1297. /*
  1298. * Add the used indirect page back to the list of
  1299. * available pages for indirect grefs.
  1300. */
  1301. if (!info->feature_persistent) {
  1302. indirect_page = s->indirect_grants[i]->page;
  1303. list_add(&indirect_page->lru, &rinfo->indirect_pages);
  1304. }
  1305. s->indirect_grants[i]->gref = GRANT_INVALID_REF;
  1306. list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
  1307. }
  1308. }
  1309. }
  1310. return 1;
  1311. }
  1312. static irqreturn_t blkif_interrupt(int irq, void *dev_id)
  1313. {
  1314. struct request *req;
  1315. struct blkif_response *bret;
  1316. RING_IDX i, rp;
  1317. unsigned long flags;
  1318. struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
  1319. struct blkfront_info *info = rinfo->dev_info;
  1320. int error;
  1321. if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
  1322. return IRQ_HANDLED;
  1323. spin_lock_irqsave(&rinfo->ring_lock, flags);
  1324. again:
  1325. rp = rinfo->ring.sring->rsp_prod;
  1326. rmb(); /* Ensure we see queued responses up to 'rp'. */
  1327. for (i = rinfo->ring.rsp_cons; i != rp; i++) {
  1328. unsigned long id;
  1329. bret = RING_GET_RESPONSE(&rinfo->ring, i);
  1330. id = bret->id;
  1331. /*
  1332. * The backend has messed up and given us an id that we would
  1333. * never have given to it (we stamp it up to BLK_RING_SIZE -
  1334. * look in get_id_from_freelist.
  1335. */
  1336. if (id >= BLK_RING_SIZE(info)) {
  1337. WARN(1, "%s: response to %s has incorrect id (%ld)\n",
  1338. info->gd->disk_name, op_name(bret->operation), id);
  1339. /* We can't safely get the 'struct request' as
  1340. * the id is busted. */
  1341. continue;
  1342. }
  1343. req = rinfo->shadow[id].request;
  1344. if (bret->operation != BLKIF_OP_DISCARD) {
  1345. /*
  1346. * We may need to wait for an extra response if the
  1347. * I/O request is split in 2
  1348. */
  1349. if (!blkif_completion(&id, rinfo, bret))
  1350. continue;
  1351. }
  1352. if (add_id_to_freelist(rinfo, id)) {
  1353. WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
  1354. info->gd->disk_name, op_name(bret->operation), id);
  1355. continue;
  1356. }
  1357. error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
  1358. switch (bret->operation) {
  1359. case BLKIF_OP_DISCARD:
  1360. if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
  1361. struct request_queue *rq = info->rq;
  1362. printk(KERN_WARNING "blkfront: %s: %s op failed\n",
  1363. info->gd->disk_name, op_name(bret->operation));
  1364. error = -EOPNOTSUPP;
  1365. info->feature_discard = 0;
  1366. info->feature_secdiscard = 0;
  1367. queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
  1368. queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
  1369. }
  1370. blk_mq_complete_request(req, error);
  1371. break;
  1372. case BLKIF_OP_FLUSH_DISKCACHE:
  1373. case BLKIF_OP_WRITE_BARRIER:
  1374. if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
  1375. printk(KERN_WARNING "blkfront: %s: %s op failed\n",
  1376. info->gd->disk_name, op_name(bret->operation));
  1377. error = -EOPNOTSUPP;
  1378. }
  1379. if (unlikely(bret->status == BLKIF_RSP_ERROR &&
  1380. rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
  1381. printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
  1382. info->gd->disk_name, op_name(bret->operation));
  1383. error = -EOPNOTSUPP;
  1384. }
  1385. if (unlikely(error)) {
  1386. if (error == -EOPNOTSUPP)
  1387. error = 0;
  1388. info->feature_fua = 0;
  1389. info->feature_flush = 0;
  1390. xlvbd_flush(info);
  1391. }
  1392. /* fall through */
  1393. case BLKIF_OP_READ:
  1394. case BLKIF_OP_WRITE:
  1395. if (unlikely(bret->status != BLKIF_RSP_OKAY))
  1396. dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
  1397. "request: %x\n", bret->status);
  1398. blk_mq_complete_request(req, error);
  1399. break;
  1400. default:
  1401. BUG();
  1402. }
  1403. }
  1404. rinfo->ring.rsp_cons = i;
  1405. if (i != rinfo->ring.req_prod_pvt) {
  1406. int more_to_do;
  1407. RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
  1408. if (more_to_do)
  1409. goto again;
  1410. } else
  1411. rinfo->ring.sring->rsp_event = i + 1;
  1412. kick_pending_request_queues_locked(rinfo);
  1413. spin_unlock_irqrestore(&rinfo->ring_lock, flags);
  1414. return IRQ_HANDLED;
  1415. }
  1416. static int setup_blkring(struct xenbus_device *dev,
  1417. struct blkfront_ring_info *rinfo)
  1418. {
  1419. struct blkif_sring *sring;
  1420. int err, i;
  1421. struct blkfront_info *info = rinfo->dev_info;
  1422. unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
  1423. grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
  1424. for (i = 0; i < info->nr_ring_pages; i++)
  1425. rinfo->ring_ref[i] = GRANT_INVALID_REF;
  1426. sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
  1427. get_order(ring_size));
  1428. if (!sring) {
  1429. xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
  1430. return -ENOMEM;
  1431. }
  1432. SHARED_RING_INIT(sring);
  1433. FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
  1434. err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
  1435. if (err < 0) {
  1436. free_pages((unsigned long)sring, get_order(ring_size));
  1437. rinfo->ring.sring = NULL;
  1438. goto fail;
  1439. }
  1440. for (i = 0; i < info->nr_ring_pages; i++)
  1441. rinfo->ring_ref[i] = gref[i];
  1442. err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
  1443. if (err)
  1444. goto fail;
  1445. err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
  1446. "blkif", rinfo);
  1447. if (err <= 0) {
  1448. xenbus_dev_fatal(dev, err,
  1449. "bind_evtchn_to_irqhandler failed");
  1450. goto fail;
  1451. }
  1452. rinfo->irq = err;
  1453. return 0;
  1454. fail:
  1455. blkif_free(info, 0);
  1456. return err;
  1457. }
  1458. /*
  1459. * Write out per-ring/queue nodes including ring-ref and event-channel, and each
  1460. * ring buffer may have multi pages depending on ->nr_ring_pages.
  1461. */
  1462. static int write_per_ring_nodes(struct xenbus_transaction xbt,
  1463. struct blkfront_ring_info *rinfo, const char *dir)
  1464. {
  1465. int err;
  1466. unsigned int i;
  1467. const char *message = NULL;
  1468. struct blkfront_info *info = rinfo->dev_info;
  1469. if (info->nr_ring_pages == 1) {
  1470. err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
  1471. if (err) {
  1472. message = "writing ring-ref";
  1473. goto abort_transaction;
  1474. }
  1475. } else {
  1476. for (i = 0; i < info->nr_ring_pages; i++) {
  1477. char ring_ref_name[RINGREF_NAME_LEN];
  1478. snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
  1479. err = xenbus_printf(xbt, dir, ring_ref_name,
  1480. "%u", rinfo->ring_ref[i]);
  1481. if (err) {
  1482. message = "writing ring-ref";
  1483. goto abort_transaction;
  1484. }
  1485. }
  1486. }
  1487. err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
  1488. if (err) {
  1489. message = "writing event-channel";
  1490. goto abort_transaction;
  1491. }
  1492. return 0;
  1493. abort_transaction:
  1494. xenbus_transaction_end(xbt, 1);
  1495. if (message)
  1496. xenbus_dev_fatal(info->xbdev, err, "%s", message);
  1497. return err;
  1498. }
  1499. /* Common code used when first setting up, and when resuming. */
  1500. static int talk_to_blkback(struct xenbus_device *dev,
  1501. struct blkfront_info *info)
  1502. {
  1503. const char *message = NULL;
  1504. struct xenbus_transaction xbt;
  1505. int err;
  1506. unsigned int i, max_page_order;
  1507. unsigned int ring_page_order;
  1508. max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
  1509. "max-ring-page-order", 0);
  1510. ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
  1511. info->nr_ring_pages = 1 << ring_page_order;
  1512. for (i = 0; i < info->nr_rings; i++) {
  1513. struct blkfront_ring_info *rinfo = &info->rinfo[i];
  1514. /* Create shared ring, alloc event channel. */
  1515. err = setup_blkring(dev, rinfo);
  1516. if (err)
  1517. goto destroy_blkring;
  1518. }
  1519. again:
  1520. err = xenbus_transaction_start(&xbt);
  1521. if (err) {
  1522. xenbus_dev_fatal(dev, err, "starting transaction");
  1523. goto destroy_blkring;
  1524. }
  1525. if (info->nr_ring_pages > 1) {
  1526. err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
  1527. ring_page_order);
  1528. if (err) {
  1529. message = "writing ring-page-order";
  1530. goto abort_transaction;
  1531. }
  1532. }
  1533. /* We already got the number of queues/rings in _probe */
  1534. if (info->nr_rings == 1) {
  1535. err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
  1536. if (err)
  1537. goto destroy_blkring;
  1538. } else {
  1539. char *path;
  1540. size_t pathsize;
  1541. err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
  1542. info->nr_rings);
  1543. if (err) {
  1544. message = "writing multi-queue-num-queues";
  1545. goto abort_transaction;
  1546. }
  1547. pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
  1548. path = kmalloc(pathsize, GFP_KERNEL);
  1549. if (!path) {
  1550. err = -ENOMEM;
  1551. message = "ENOMEM while writing ring references";
  1552. goto abort_transaction;
  1553. }
  1554. for (i = 0; i < info->nr_rings; i++) {
  1555. memset(path, 0, pathsize);
  1556. snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
  1557. err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
  1558. if (err) {
  1559. kfree(path);
  1560. goto destroy_blkring;
  1561. }
  1562. }
  1563. kfree(path);
  1564. }
  1565. err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
  1566. XEN_IO_PROTO_ABI_NATIVE);
  1567. if (err) {
  1568. message = "writing protocol";
  1569. goto abort_transaction;
  1570. }
  1571. err = xenbus_printf(xbt, dev->nodename,
  1572. "feature-persistent", "%u", 1);
  1573. if (err)
  1574. dev_warn(&dev->dev,
  1575. "writing persistent grants feature to xenbus");
  1576. err = xenbus_transaction_end(xbt, 0);
  1577. if (err) {
  1578. if (err == -EAGAIN)
  1579. goto again;
  1580. xenbus_dev_fatal(dev, err, "completing transaction");
  1581. goto destroy_blkring;
  1582. }
  1583. for (i = 0; i < info->nr_rings; i++) {
  1584. unsigned int j;
  1585. struct blkfront_ring_info *rinfo = &info->rinfo[i];
  1586. for (j = 0; j < BLK_RING_SIZE(info); j++)
  1587. rinfo->shadow[j].req.u.rw.id = j + 1;
  1588. rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
  1589. }
  1590. xenbus_switch_state(dev, XenbusStateInitialised);
  1591. return 0;
  1592. abort_transaction:
  1593. xenbus_transaction_end(xbt, 1);
  1594. if (message)
  1595. xenbus_dev_fatal(dev, err, "%s", message);
  1596. destroy_blkring:
  1597. blkif_free(info, 0);
  1598. kfree(info);
  1599. dev_set_drvdata(&dev->dev, NULL);
  1600. return err;
  1601. }
  1602. static int negotiate_mq(struct blkfront_info *info)
  1603. {
  1604. unsigned int backend_max_queues;
  1605. unsigned int i;
  1606. BUG_ON(info->nr_rings);
  1607. /* Check if backend supports multiple queues. */
  1608. backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
  1609. "multi-queue-max-queues", 1);
  1610. info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
  1611. /* We need at least one ring. */
  1612. if (!info->nr_rings)
  1613. info->nr_rings = 1;
  1614. info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
  1615. if (!info->rinfo) {
  1616. xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
  1617. return -ENOMEM;
  1618. }
  1619. for (i = 0; i < info->nr_rings; i++) {
  1620. struct blkfront_ring_info *rinfo;
  1621. rinfo = &info->rinfo[i];
  1622. INIT_LIST_HEAD(&rinfo->indirect_pages);
  1623. INIT_LIST_HEAD(&rinfo->grants);
  1624. rinfo->dev_info = info;
  1625. INIT_WORK(&rinfo->work, blkif_restart_queue);
  1626. spin_lock_init(&rinfo->ring_lock);
  1627. }
  1628. return 0;
  1629. }
  1630. /**
  1631. * Entry point to this code when a new device is created. Allocate the basic
  1632. * structures and the ring buffer for communication with the backend, and
  1633. * inform the backend of the appropriate details for those. Switch to
  1634. * Initialised state.
  1635. */
  1636. static int blkfront_probe(struct xenbus_device *dev,
  1637. const struct xenbus_device_id *id)
  1638. {
  1639. int err, vdevice;
  1640. struct blkfront_info *info;
  1641. /* FIXME: Use dynamic device id if this is not set. */
  1642. err = xenbus_scanf(XBT_NIL, dev->nodename,
  1643. "virtual-device", "%i", &vdevice);
  1644. if (err != 1) {
  1645. /* go looking in the extended area instead */
  1646. err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
  1647. "%i", &vdevice);
  1648. if (err != 1) {
  1649. xenbus_dev_fatal(dev, err, "reading virtual-device");
  1650. return err;
  1651. }
  1652. }
  1653. if (xen_hvm_domain()) {
  1654. char *type;
  1655. int len;
  1656. /* no unplug has been done: do not hook devices != xen vbds */
  1657. if (xen_has_pv_and_legacy_disk_devices()) {
  1658. int major;
  1659. if (!VDEV_IS_EXTENDED(vdevice))
  1660. major = BLKIF_MAJOR(vdevice);
  1661. else
  1662. major = XENVBD_MAJOR;
  1663. if (major != XENVBD_MAJOR) {
  1664. printk(KERN_INFO
  1665. "%s: HVM does not support vbd %d as xen block device\n",
  1666. __func__, vdevice);
  1667. return -ENODEV;
  1668. }
  1669. }
  1670. /* do not create a PV cdrom device if we are an HVM guest */
  1671. type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
  1672. if (IS_ERR(type))
  1673. return -ENODEV;
  1674. if (strncmp(type, "cdrom", 5) == 0) {
  1675. kfree(type);
  1676. return -ENODEV;
  1677. }
  1678. kfree(type);
  1679. }
  1680. info = kzalloc(sizeof(*info), GFP_KERNEL);
  1681. if (!info) {
  1682. xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
  1683. return -ENOMEM;
  1684. }
  1685. info->xbdev = dev;
  1686. err = negotiate_mq(info);
  1687. if (err) {
  1688. kfree(info);
  1689. return err;
  1690. }
  1691. mutex_init(&info->mutex);
  1692. info->vdevice = vdevice;
  1693. info->connected = BLKIF_STATE_DISCONNECTED;
  1694. /* Front end dir is a number, which is used as the id. */
  1695. info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
  1696. dev_set_drvdata(&dev->dev, info);
  1697. return 0;
  1698. }
  1699. static void split_bio_end(struct bio *bio)
  1700. {
  1701. struct split_bio *split_bio = bio->bi_private;
  1702. if (atomic_dec_and_test(&split_bio->pending)) {
  1703. split_bio->bio->bi_phys_segments = 0;
  1704. split_bio->bio->bi_error = bio->bi_error;
  1705. bio_endio(split_bio->bio);
  1706. kfree(split_bio);
  1707. }
  1708. bio_put(bio);
  1709. }
  1710. static int blkif_recover(struct blkfront_info *info)
  1711. {
  1712. unsigned int i, r_index;
  1713. struct request *req, *n;
  1714. int rc;
  1715. struct bio *bio, *cloned_bio;
  1716. unsigned int segs, offset;
  1717. int pending, size;
  1718. struct split_bio *split_bio;
  1719. blkfront_gather_backend_features(info);
  1720. /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
  1721. blkif_set_queue_limits(info);
  1722. segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
  1723. blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
  1724. for (r_index = 0; r_index < info->nr_rings; r_index++) {
  1725. struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
  1726. rc = blkfront_setup_indirect(rinfo);
  1727. if (rc)
  1728. return rc;
  1729. }
  1730. xenbus_switch_state(info->xbdev, XenbusStateConnected);
  1731. /* Now safe for us to use the shared ring */
  1732. info->connected = BLKIF_STATE_CONNECTED;
  1733. for (r_index = 0; r_index < info->nr_rings; r_index++) {
  1734. struct blkfront_ring_info *rinfo;
  1735. rinfo = &info->rinfo[r_index];
  1736. /* Kick any other new requests queued since we resumed */
  1737. kick_pending_request_queues(rinfo);
  1738. }
  1739. list_for_each_entry_safe(req, n, &info->requests, queuelist) {
  1740. /* Requeue pending requests (flush or discard) */
  1741. list_del_init(&req->queuelist);
  1742. BUG_ON(req->nr_phys_segments > segs);
  1743. blk_mq_requeue_request(req, false);
  1744. }
  1745. blk_mq_start_stopped_hw_queues(info->rq, true);
  1746. blk_mq_kick_requeue_list(info->rq);
  1747. while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
  1748. /* Traverse the list of pending bios and re-queue them */
  1749. if (bio_segments(bio) > segs) {
  1750. /*
  1751. * This bio has more segments than what we can
  1752. * handle, we have to split it.
  1753. */
  1754. pending = (bio_segments(bio) + segs - 1) / segs;
  1755. split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
  1756. BUG_ON(split_bio == NULL);
  1757. atomic_set(&split_bio->pending, pending);
  1758. split_bio->bio = bio;
  1759. for (i = 0; i < pending; i++) {
  1760. offset = (i * segs * XEN_PAGE_SIZE) >> 9;
  1761. size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
  1762. (unsigned int)bio_sectors(bio) - offset);
  1763. cloned_bio = bio_clone(bio, GFP_NOIO);
  1764. BUG_ON(cloned_bio == NULL);
  1765. bio_trim(cloned_bio, offset, size);
  1766. cloned_bio->bi_private = split_bio;
  1767. cloned_bio->bi_end_io = split_bio_end;
  1768. submit_bio(cloned_bio);
  1769. }
  1770. /*
  1771. * Now we have to wait for all those smaller bios to
  1772. * end, so we can also end the "parent" bio.
  1773. */
  1774. continue;
  1775. }
  1776. /* We don't need to split this bio */
  1777. submit_bio(bio);
  1778. }
  1779. return 0;
  1780. }
  1781. /**
  1782. * We are reconnecting to the backend, due to a suspend/resume, or a backend
  1783. * driver restart. We tear down our blkif structure and recreate it, but
  1784. * leave the device-layer structures intact so that this is transparent to the
  1785. * rest of the kernel.
  1786. */
  1787. static int blkfront_resume(struct xenbus_device *dev)
  1788. {
  1789. struct blkfront_info *info = dev_get_drvdata(&dev->dev);
  1790. int err = 0;
  1791. unsigned int i, j;
  1792. dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
  1793. bio_list_init(&info->bio_list);
  1794. INIT_LIST_HEAD(&info->requests);
  1795. for (i = 0; i < info->nr_rings; i++) {
  1796. struct blkfront_ring_info *rinfo = &info->rinfo[i];
  1797. struct bio_list merge_bio;
  1798. struct blk_shadow *shadow = rinfo->shadow;
  1799. for (j = 0; j < BLK_RING_SIZE(info); j++) {
  1800. /* Not in use? */
  1801. if (!shadow[j].request)
  1802. continue;
  1803. /*
  1804. * Get the bios in the request so we can re-queue them.
  1805. */
  1806. if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
  1807. req_op(shadow[i].request) == REQ_OP_DISCARD ||
  1808. req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
  1809. shadow[j].request->cmd_flags & REQ_FUA) {
  1810. /*
  1811. * Flush operations don't contain bios, so
  1812. * we need to requeue the whole request
  1813. *
  1814. * XXX: but this doesn't make any sense for a
  1815. * write with the FUA flag set..
  1816. */
  1817. list_add(&shadow[j].request->queuelist, &info->requests);
  1818. continue;
  1819. }
  1820. merge_bio.head = shadow[j].request->bio;
  1821. merge_bio.tail = shadow[j].request->biotail;
  1822. bio_list_merge(&info->bio_list, &merge_bio);
  1823. shadow[j].request->bio = NULL;
  1824. blk_mq_end_request(shadow[j].request, 0);
  1825. }
  1826. }
  1827. blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
  1828. err = negotiate_mq(info);
  1829. if (err)
  1830. return err;
  1831. err = talk_to_blkback(dev, info);
  1832. if (!err)
  1833. blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
  1834. /*
  1835. * We have to wait for the backend to switch to
  1836. * connected state, since we want to read which
  1837. * features it supports.
  1838. */
  1839. return err;
  1840. }
  1841. static void blkfront_closing(struct blkfront_info *info)
  1842. {
  1843. struct xenbus_device *xbdev = info->xbdev;
  1844. struct block_device *bdev = NULL;
  1845. mutex_lock(&info->mutex);
  1846. if (xbdev->state == XenbusStateClosing) {
  1847. mutex_unlock(&info->mutex);
  1848. return;
  1849. }
  1850. if (info->gd)
  1851. bdev = bdget_disk(info->gd, 0);
  1852. mutex_unlock(&info->mutex);
  1853. if (!bdev) {
  1854. xenbus_frontend_closed(xbdev);
  1855. return;
  1856. }
  1857. mutex_lock(&bdev->bd_mutex);
  1858. if (bdev->bd_openers) {
  1859. xenbus_dev_error(xbdev, -EBUSY,
  1860. "Device in use; refusing to close");
  1861. xenbus_switch_state(xbdev, XenbusStateClosing);
  1862. } else {
  1863. xlvbd_release_gendisk(info);
  1864. xenbus_frontend_closed(xbdev);
  1865. }
  1866. mutex_unlock(&bdev->bd_mutex);
  1867. bdput(bdev);
  1868. }
  1869. static void blkfront_setup_discard(struct blkfront_info *info)
  1870. {
  1871. int err;
  1872. unsigned int discard_granularity;
  1873. unsigned int discard_alignment;
  1874. info->feature_discard = 1;
  1875. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  1876. "discard-granularity", "%u", &discard_granularity,
  1877. "discard-alignment", "%u", &discard_alignment,
  1878. NULL);
  1879. if (!err) {
  1880. info->discard_granularity = discard_granularity;
  1881. info->discard_alignment = discard_alignment;
  1882. }
  1883. info->feature_secdiscard =
  1884. !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
  1885. 0);
  1886. }
  1887. static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
  1888. {
  1889. unsigned int psegs, grants;
  1890. int err, i;
  1891. struct blkfront_info *info = rinfo->dev_info;
  1892. if (info->max_indirect_segments == 0) {
  1893. if (!HAS_EXTRA_REQ)
  1894. grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
  1895. else {
  1896. /*
  1897. * When an extra req is required, the maximum
  1898. * grants supported is related to the size of the
  1899. * Linux block segment.
  1900. */
  1901. grants = GRANTS_PER_PSEG;
  1902. }
  1903. }
  1904. else
  1905. grants = info->max_indirect_segments;
  1906. psegs = grants / GRANTS_PER_PSEG;
  1907. err = fill_grant_buffer(rinfo,
  1908. (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
  1909. if (err)
  1910. goto out_of_memory;
  1911. if (!info->feature_persistent && info->max_indirect_segments) {
  1912. /*
  1913. * We are using indirect descriptors but not persistent
  1914. * grants, we need to allocate a set of pages that can be
  1915. * used for mapping indirect grefs
  1916. */
  1917. int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
  1918. BUG_ON(!list_empty(&rinfo->indirect_pages));
  1919. for (i = 0; i < num; i++) {
  1920. struct page *indirect_page = alloc_page(GFP_NOIO);
  1921. if (!indirect_page)
  1922. goto out_of_memory;
  1923. list_add(&indirect_page->lru, &rinfo->indirect_pages);
  1924. }
  1925. }
  1926. for (i = 0; i < BLK_RING_SIZE(info); i++) {
  1927. rinfo->shadow[i].grants_used = kzalloc(
  1928. sizeof(rinfo->shadow[i].grants_used[0]) * grants,
  1929. GFP_NOIO);
  1930. rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO);
  1931. if (info->max_indirect_segments)
  1932. rinfo->shadow[i].indirect_grants = kzalloc(
  1933. sizeof(rinfo->shadow[i].indirect_grants[0]) *
  1934. INDIRECT_GREFS(grants),
  1935. GFP_NOIO);
  1936. if ((rinfo->shadow[i].grants_used == NULL) ||
  1937. (rinfo->shadow[i].sg == NULL) ||
  1938. (info->max_indirect_segments &&
  1939. (rinfo->shadow[i].indirect_grants == NULL)))
  1940. goto out_of_memory;
  1941. sg_init_table(rinfo->shadow[i].sg, psegs);
  1942. }
  1943. return 0;
  1944. out_of_memory:
  1945. for (i = 0; i < BLK_RING_SIZE(info); i++) {
  1946. kfree(rinfo->shadow[i].grants_used);
  1947. rinfo->shadow[i].grants_used = NULL;
  1948. kfree(rinfo->shadow[i].sg);
  1949. rinfo->shadow[i].sg = NULL;
  1950. kfree(rinfo->shadow[i].indirect_grants);
  1951. rinfo->shadow[i].indirect_grants = NULL;
  1952. }
  1953. if (!list_empty(&rinfo->indirect_pages)) {
  1954. struct page *indirect_page, *n;
  1955. list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
  1956. list_del(&indirect_page->lru);
  1957. __free_page(indirect_page);
  1958. }
  1959. }
  1960. return -ENOMEM;
  1961. }
  1962. /*
  1963. * Gather all backend feature-*
  1964. */
  1965. static void blkfront_gather_backend_features(struct blkfront_info *info)
  1966. {
  1967. unsigned int indirect_segments;
  1968. info->feature_flush = 0;
  1969. info->feature_fua = 0;
  1970. /*
  1971. * If there's no "feature-barrier" defined, then it means
  1972. * we're dealing with a very old backend which writes
  1973. * synchronously; nothing to do.
  1974. *
  1975. * If there are barriers, then we use flush.
  1976. */
  1977. if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
  1978. info->feature_flush = 1;
  1979. info->feature_fua = 1;
  1980. }
  1981. /*
  1982. * And if there is "feature-flush-cache" use that above
  1983. * barriers.
  1984. */
  1985. if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
  1986. 0)) {
  1987. info->feature_flush = 1;
  1988. info->feature_fua = 0;
  1989. }
  1990. if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
  1991. blkfront_setup_discard(info);
  1992. info->feature_persistent =
  1993. xenbus_read_unsigned(info->xbdev->otherend,
  1994. "feature-persistent", 0);
  1995. indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
  1996. "feature-max-indirect-segments", 0);
  1997. info->max_indirect_segments = min(indirect_segments,
  1998. xen_blkif_max_segments);
  1999. }
  2000. /*
  2001. * Invoked when the backend is finally 'ready' (and has told produced
  2002. * the details about the physical device - #sectors, size, etc).
  2003. */
  2004. static void blkfront_connect(struct blkfront_info *info)
  2005. {
  2006. unsigned long long sectors;
  2007. unsigned long sector_size;
  2008. unsigned int physical_sector_size;
  2009. unsigned int binfo;
  2010. int err, i;
  2011. switch (info->connected) {
  2012. case BLKIF_STATE_CONNECTED:
  2013. /*
  2014. * Potentially, the back-end may be signalling
  2015. * a capacity change; update the capacity.
  2016. */
  2017. err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
  2018. "sectors", "%Lu", &sectors);
  2019. if (XENBUS_EXIST_ERR(err))
  2020. return;
  2021. printk(KERN_INFO "Setting capacity to %Lu\n",
  2022. sectors);
  2023. set_capacity(info->gd, sectors);
  2024. revalidate_disk(info->gd);
  2025. return;
  2026. case BLKIF_STATE_SUSPENDED:
  2027. /*
  2028. * If we are recovering from suspension, we need to wait
  2029. * for the backend to announce it's features before
  2030. * reconnecting, at least we need to know if the backend
  2031. * supports indirect descriptors, and how many.
  2032. */
  2033. blkif_recover(info);
  2034. return;
  2035. default:
  2036. break;
  2037. }
  2038. dev_dbg(&info->xbdev->dev, "%s:%s.\n",
  2039. __func__, info->xbdev->otherend);
  2040. err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
  2041. "sectors", "%llu", &sectors,
  2042. "info", "%u", &binfo,
  2043. "sector-size", "%lu", &sector_size,
  2044. NULL);
  2045. if (err) {
  2046. xenbus_dev_fatal(info->xbdev, err,
  2047. "reading backend fields at %s",
  2048. info->xbdev->otherend);
  2049. return;
  2050. }
  2051. /*
  2052. * physcial-sector-size is a newer field, so old backends may not
  2053. * provide this. Assume physical sector size to be the same as
  2054. * sector_size in that case.
  2055. */
  2056. physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
  2057. "physical-sector-size",
  2058. sector_size);
  2059. blkfront_gather_backend_features(info);
  2060. for (i = 0; i < info->nr_rings; i++) {
  2061. err = blkfront_setup_indirect(&info->rinfo[i]);
  2062. if (err) {
  2063. xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
  2064. info->xbdev->otherend);
  2065. blkif_free(info, 0);
  2066. break;
  2067. }
  2068. }
  2069. err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
  2070. physical_sector_size);
  2071. if (err) {
  2072. xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
  2073. info->xbdev->otherend);
  2074. goto fail;
  2075. }
  2076. xenbus_switch_state(info->xbdev, XenbusStateConnected);
  2077. /* Kick pending requests. */
  2078. info->connected = BLKIF_STATE_CONNECTED;
  2079. for (i = 0; i < info->nr_rings; i++)
  2080. kick_pending_request_queues(&info->rinfo[i]);
  2081. device_add_disk(&info->xbdev->dev, info->gd);
  2082. info->is_ready = 1;
  2083. return;
  2084. fail:
  2085. blkif_free(info, 0);
  2086. return;
  2087. }
  2088. /**
  2089. * Callback received when the backend's state changes.
  2090. */
  2091. static void blkback_changed(struct xenbus_device *dev,
  2092. enum xenbus_state backend_state)
  2093. {
  2094. struct blkfront_info *info = dev_get_drvdata(&dev->dev);
  2095. dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
  2096. switch (backend_state) {
  2097. case XenbusStateInitWait:
  2098. if (dev->state != XenbusStateInitialising)
  2099. break;
  2100. if (talk_to_blkback(dev, info))
  2101. break;
  2102. case XenbusStateInitialising:
  2103. case XenbusStateInitialised:
  2104. case XenbusStateReconfiguring:
  2105. case XenbusStateReconfigured:
  2106. case XenbusStateUnknown:
  2107. break;
  2108. case XenbusStateConnected:
  2109. /*
  2110. * talk_to_blkback sets state to XenbusStateInitialised
  2111. * and blkfront_connect sets it to XenbusStateConnected
  2112. * (if connection went OK).
  2113. *
  2114. * If the backend (or toolstack) decides to poke at backend
  2115. * state (and re-trigger the watch by setting the state repeatedly
  2116. * to XenbusStateConnected (4)) we need to deal with this.
  2117. * This is allowed as this is used to communicate to the guest
  2118. * that the size of disk has changed!
  2119. */
  2120. if ((dev->state != XenbusStateInitialised) &&
  2121. (dev->state != XenbusStateConnected)) {
  2122. if (talk_to_blkback(dev, info))
  2123. break;
  2124. }
  2125. blkfront_connect(info);
  2126. break;
  2127. case XenbusStateClosed:
  2128. if (dev->state == XenbusStateClosed)
  2129. break;
  2130. /* Missed the backend's Closing state -- fallthrough */
  2131. case XenbusStateClosing:
  2132. if (info)
  2133. blkfront_closing(info);
  2134. break;
  2135. }
  2136. }
  2137. static int blkfront_remove(struct xenbus_device *xbdev)
  2138. {
  2139. struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
  2140. struct block_device *bdev = NULL;
  2141. struct gendisk *disk;
  2142. dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
  2143. blkif_free(info, 0);
  2144. mutex_lock(&info->mutex);
  2145. disk = info->gd;
  2146. if (disk)
  2147. bdev = bdget_disk(disk, 0);
  2148. info->xbdev = NULL;
  2149. mutex_unlock(&info->mutex);
  2150. if (!bdev) {
  2151. kfree(info);
  2152. return 0;
  2153. }
  2154. /*
  2155. * The xbdev was removed before we reached the Closed
  2156. * state. See if it's safe to remove the disk. If the bdev
  2157. * isn't closed yet, we let release take care of it.
  2158. */
  2159. mutex_lock(&bdev->bd_mutex);
  2160. info = disk->private_data;
  2161. dev_warn(disk_to_dev(disk),
  2162. "%s was hot-unplugged, %d stale handles\n",
  2163. xbdev->nodename, bdev->bd_openers);
  2164. if (info && !bdev->bd_openers) {
  2165. xlvbd_release_gendisk(info);
  2166. disk->private_data = NULL;
  2167. kfree(info);
  2168. }
  2169. mutex_unlock(&bdev->bd_mutex);
  2170. bdput(bdev);
  2171. return 0;
  2172. }
  2173. static int blkfront_is_ready(struct xenbus_device *dev)
  2174. {
  2175. struct blkfront_info *info = dev_get_drvdata(&dev->dev);
  2176. return info->is_ready && info->xbdev;
  2177. }
  2178. static int blkif_open(struct block_device *bdev, fmode_t mode)
  2179. {
  2180. struct gendisk *disk = bdev->bd_disk;
  2181. struct blkfront_info *info;
  2182. int err = 0;
  2183. mutex_lock(&blkfront_mutex);
  2184. info = disk->private_data;
  2185. if (!info) {
  2186. /* xbdev gone */
  2187. err = -ERESTARTSYS;
  2188. goto out;
  2189. }
  2190. mutex_lock(&info->mutex);
  2191. if (!info->gd)
  2192. /* xbdev is closed */
  2193. err = -ERESTARTSYS;
  2194. mutex_unlock(&info->mutex);
  2195. out:
  2196. mutex_unlock(&blkfront_mutex);
  2197. return err;
  2198. }
  2199. static void blkif_release(struct gendisk *disk, fmode_t mode)
  2200. {
  2201. struct blkfront_info *info = disk->private_data;
  2202. struct block_device *bdev;
  2203. struct xenbus_device *xbdev;
  2204. mutex_lock(&blkfront_mutex);
  2205. bdev = bdget_disk(disk, 0);
  2206. if (!bdev) {
  2207. WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
  2208. goto out_mutex;
  2209. }
  2210. if (bdev->bd_openers)
  2211. goto out;
  2212. /*
  2213. * Check if we have been instructed to close. We will have
  2214. * deferred this request, because the bdev was still open.
  2215. */
  2216. mutex_lock(&info->mutex);
  2217. xbdev = info->xbdev;
  2218. if (xbdev && xbdev->state == XenbusStateClosing) {
  2219. /* pending switch to state closed */
  2220. dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
  2221. xlvbd_release_gendisk(info);
  2222. xenbus_frontend_closed(info->xbdev);
  2223. }
  2224. mutex_unlock(&info->mutex);
  2225. if (!xbdev) {
  2226. /* sudden device removal */
  2227. dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
  2228. xlvbd_release_gendisk(info);
  2229. disk->private_data = NULL;
  2230. kfree(info);
  2231. }
  2232. out:
  2233. bdput(bdev);
  2234. out_mutex:
  2235. mutex_unlock(&blkfront_mutex);
  2236. }
  2237. static const struct block_device_operations xlvbd_block_fops =
  2238. {
  2239. .owner = THIS_MODULE,
  2240. .open = blkif_open,
  2241. .release = blkif_release,
  2242. .getgeo = blkif_getgeo,
  2243. .ioctl = blkif_ioctl,
  2244. };
  2245. static const struct xenbus_device_id blkfront_ids[] = {
  2246. { "vbd" },
  2247. { "" }
  2248. };
  2249. static struct xenbus_driver blkfront_driver = {
  2250. .ids = blkfront_ids,
  2251. .probe = blkfront_probe,
  2252. .remove = blkfront_remove,
  2253. .resume = blkfront_resume,
  2254. .otherend_changed = blkback_changed,
  2255. .is_ready = blkfront_is_ready,
  2256. };
  2257. static int __init xlblk_init(void)
  2258. {
  2259. int ret;
  2260. int nr_cpus = num_online_cpus();
  2261. if (!xen_domain())
  2262. return -ENODEV;
  2263. if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
  2264. pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
  2265. xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
  2266. xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
  2267. }
  2268. if (xen_blkif_max_queues > nr_cpus) {
  2269. pr_info("Invalid max_queues (%d), will use default max: %d.\n",
  2270. xen_blkif_max_queues, nr_cpus);
  2271. xen_blkif_max_queues = nr_cpus;
  2272. }
  2273. if (!xen_has_pv_disk_devices())
  2274. return -ENODEV;
  2275. if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
  2276. printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
  2277. XENVBD_MAJOR, DEV_NAME);
  2278. return -ENODEV;
  2279. }
  2280. ret = xenbus_register_frontend(&blkfront_driver);
  2281. if (ret) {
  2282. unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
  2283. return ret;
  2284. }
  2285. return 0;
  2286. }
  2287. module_init(xlblk_init);
  2288. static void __exit xlblk_exit(void)
  2289. {
  2290. xenbus_unregister_driver(&blkfront_driver);
  2291. unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
  2292. kfree(minors);
  2293. }
  2294. module_exit(xlblk_exit);
  2295. MODULE_DESCRIPTION("Xen virtual block device frontend");
  2296. MODULE_LICENSE("GPL");
  2297. MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
  2298. MODULE_ALIAS("xen:vbd");
  2299. MODULE_ALIAS("xenblk");