ibmvmc.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * IBM Power Systems Virtual Management Channel Support.
  4. *
  5. * Copyright (c) 2004, 2018 IBM Corp.
  6. * Dave Engebretsen engebret@us.ibm.com
  7. * Steven Royer seroyer@linux.vnet.ibm.com
  8. * Adam Reznechek adreznec@linux.vnet.ibm.com
  9. * Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kthread.h>
  14. #include <linux/major.h>
  15. #include <linux/string.h>
  16. #include <linux/fcntl.h>
  17. #include <linux/slab.h>
  18. #include <linux/poll.h>
  19. #include <linux/init.h>
  20. #include <linux/fs.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/percpu.h>
  24. #include <linux/delay.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/io.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/sched/signal.h>
  29. #include <asm/byteorder.h>
  30. #include <asm/irq.h>
  31. #include <asm/vio.h>
  32. #include "ibmvmc.h"
  33. #define IBMVMC_DRIVER_VERSION "1.0"
  34. /*
  35. * Static global variables
  36. */
  37. static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
  38. static const char ibmvmc_driver_name[] = "ibmvmc";
  39. static struct ibmvmc_struct ibmvmc;
  40. static struct ibmvmc_hmc hmcs[MAX_HMCS];
  41. static struct crq_server_adapter ibmvmc_adapter;
  42. static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
  43. static int ibmvmc_max_hmcs = DEFAULT_HMCS;
  44. static int ibmvmc_max_mtu = DEFAULT_MTU;
  45. static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
  46. u64 dliobn, u64 dlioba)
  47. {
  48. long rc = 0;
  49. /* Ensure all writes to source memory are visible before hcall */
  50. dma_wmb();
  51. pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
  52. length, sliobn, slioba, dliobn, dlioba);
  53. rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
  54. dliobn, dlioba);
  55. pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
  56. return rc;
  57. }
  58. static inline void h_free_crq(uint32_t unit_address)
  59. {
  60. long rc = 0;
  61. do {
  62. if (H_IS_LONG_BUSY(rc))
  63. msleep(get_longbusy_msecs(rc));
  64. rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
  65. } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  66. }
  67. /**
  68. * h_request_vmc: - request a hypervisor virtual management channel device
  69. * @vmc_index: drc index of the vmc device created
  70. *
  71. * Requests the hypervisor create a new virtual management channel device,
  72. * allowing this partition to send hypervisor virtualization control
  73. * commands.
  74. *
  75. * Return:
  76. * 0 - Success
  77. * Non-zero - Failure
  78. */
  79. static inline long h_request_vmc(u32 *vmc_index)
  80. {
  81. long rc = 0;
  82. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  83. do {
  84. if (H_IS_LONG_BUSY(rc))
  85. msleep(get_longbusy_msecs(rc));
  86. /* Call to request the VMC device from phyp */
  87. rc = plpar_hcall(H_REQUEST_VMC, retbuf);
  88. pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
  89. *vmc_index = retbuf[0];
  90. } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  91. return rc;
  92. }
  93. /* routines for managing a command/response queue */
  94. /**
  95. * ibmvmc_handle_event: - Interrupt handler for crq events
  96. * @irq: number of irq to handle, not used
  97. * @dev_instance: crq_server_adapter that received interrupt
  98. *
  99. * Disables interrupts and schedules ibmvmc_task
  100. *
  101. * Always returns IRQ_HANDLED
  102. */
  103. static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
  104. {
  105. struct crq_server_adapter *adapter =
  106. (struct crq_server_adapter *)dev_instance;
  107. vio_disable_interrupts(to_vio_dev(adapter->dev));
  108. tasklet_schedule(&adapter->work_task);
  109. return IRQ_HANDLED;
  110. }
  111. /**
  112. * ibmvmc_release_crq_queue - Release CRQ Queue
  113. *
  114. * @adapter: crq_server_adapter struct
  115. *
  116. * Return:
  117. * 0 - Success
  118. * Non-Zero - Failure
  119. */
  120. static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
  121. {
  122. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  123. struct crq_queue *queue = &adapter->queue;
  124. free_irq(vdev->irq, (void *)adapter);
  125. tasklet_kill(&adapter->work_task);
  126. if (adapter->reset_task)
  127. kthread_stop(adapter->reset_task);
  128. h_free_crq(vdev->unit_address);
  129. dma_unmap_single(adapter->dev,
  130. queue->msg_token,
  131. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  132. free_page((unsigned long)queue->msgs);
  133. }
  134. /**
  135. * ibmvmc_reset_crq_queue - Reset CRQ Queue
  136. *
  137. * @adapter: crq_server_adapter struct
  138. *
  139. * This function calls h_free_crq and then calls H_REG_CRQ and does all the
  140. * bookkeeping to get us back to where we can communicate.
  141. *
  142. * Return:
  143. * 0 - Success
  144. * Non-Zero - Failure
  145. */
  146. static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
  147. {
  148. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  149. struct crq_queue *queue = &adapter->queue;
  150. int rc = 0;
  151. /* Close the CRQ */
  152. h_free_crq(vdev->unit_address);
  153. /* Clean out the queue */
  154. memset(queue->msgs, 0x00, PAGE_SIZE);
  155. queue->cur = 0;
  156. /* And re-open it again */
  157. rc = plpar_hcall_norets(H_REG_CRQ,
  158. vdev->unit_address,
  159. queue->msg_token, PAGE_SIZE);
  160. if (rc == 2)
  161. /* Adapter is good, but other end is not ready */
  162. dev_warn(adapter->dev, "Partner adapter not ready\n");
  163. else if (rc != 0)
  164. dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
  165. return rc;
  166. }
  167. /**
  168. * crq_queue_next_crq: - Returns the next entry in message queue
  169. * @queue: crq_queue to use
  170. *
  171. * Returns pointer to next entry in queue, or NULL if there are no new
  172. * entried in the CRQ.
  173. */
  174. static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
  175. {
  176. struct ibmvmc_crq_msg *crq;
  177. unsigned long flags;
  178. spin_lock_irqsave(&queue->lock, flags);
  179. crq = &queue->msgs[queue->cur];
  180. if (crq->valid & 0x80) {
  181. if (++queue->cur == queue->size)
  182. queue->cur = 0;
  183. /* Ensure the read of the valid bit occurs before reading any
  184. * other bits of the CRQ entry
  185. */
  186. dma_rmb();
  187. } else {
  188. crq = NULL;
  189. }
  190. spin_unlock_irqrestore(&queue->lock, flags);
  191. return crq;
  192. }
  193. /**
  194. * ibmvmc_send_crq - Send CRQ
  195. *
  196. * @adapter: crq_server_adapter struct
  197. * @word1: Word1 Data field
  198. * @word2: Word2 Data field
  199. *
  200. * Return:
  201. * 0 - Success
  202. * Non-Zero - Failure
  203. */
  204. static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
  205. u64 word1, u64 word2)
  206. {
  207. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  208. long rc = 0;
  209. dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
  210. vdev->unit_address, word1, word2);
  211. /*
  212. * Ensure the command buffer is flushed to memory before handing it
  213. * over to the other side to prevent it from fetching any stale data.
  214. */
  215. dma_wmb();
  216. rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
  217. dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
  218. return rc;
  219. }
  220. /**
  221. * alloc_dma_buffer - Create DMA Buffer
  222. *
  223. * @vdev: vio_dev struct
  224. * @size: Size field
  225. * @dma_handle: DMA address field
  226. *
  227. * Allocates memory for the command queue and maps remote memory into an
  228. * ioba.
  229. *
  230. * Returns a pointer to the buffer
  231. */
  232. static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
  233. dma_addr_t *dma_handle)
  234. {
  235. /* allocate memory */
  236. void *buffer = kzalloc(size, GFP_ATOMIC);
  237. if (!buffer) {
  238. *dma_handle = 0;
  239. return NULL;
  240. }
  241. /* DMA map */
  242. *dma_handle = dma_map_single(&vdev->dev, buffer, size,
  243. DMA_BIDIRECTIONAL);
  244. if (dma_mapping_error(&vdev->dev, *dma_handle)) {
  245. *dma_handle = 0;
  246. kzfree(buffer);
  247. return NULL;
  248. }
  249. return buffer;
  250. }
  251. /**
  252. * free_dma_buffer - Free DMA Buffer
  253. *
  254. * @vdev: vio_dev struct
  255. * @size: Size field
  256. * @vaddr: Address field
  257. * @dma_handle: DMA address field
  258. *
  259. * Releases memory for a command queue and unmaps mapped remote memory.
  260. */
  261. static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
  262. dma_addr_t dma_handle)
  263. {
  264. /* DMA unmap */
  265. dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
  266. /* deallocate memory */
  267. kzfree(vaddr);
  268. }
  269. /**
  270. * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
  271. *
  272. * @hmc_index: HMC Index Field
  273. *
  274. * Return:
  275. * Pointer to ibmvmc_buffer
  276. */
  277. static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
  278. {
  279. struct ibmvmc_buffer *buffer;
  280. struct ibmvmc_buffer *ret_buf = NULL;
  281. unsigned long i;
  282. if (hmc_index > ibmvmc.max_hmc_index)
  283. return NULL;
  284. buffer = hmcs[hmc_index].buffer;
  285. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  286. if (buffer[i].valid && buffer[i].free &&
  287. buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
  288. buffer[i].free = 0;
  289. ret_buf = &buffer[i];
  290. break;
  291. }
  292. }
  293. return ret_buf;
  294. }
  295. /**
  296. * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
  297. *
  298. * @adapter: crq_server_adapter struct
  299. * @hmc_index: Hmc Index field
  300. *
  301. * Return:
  302. * Pointer to ibmvmc_buffer
  303. */
  304. static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
  305. u8 hmc_index)
  306. {
  307. struct ibmvmc_buffer *buffer;
  308. struct ibmvmc_buffer *ret_buf = NULL;
  309. unsigned long i;
  310. if (hmc_index > ibmvmc.max_hmc_index) {
  311. dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
  312. hmc_index);
  313. return NULL;
  314. }
  315. buffer = hmcs[hmc_index].buffer;
  316. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  317. if (buffer[i].free &&
  318. buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
  319. buffer[i].free = 0;
  320. ret_buf = &buffer[i];
  321. break;
  322. }
  323. }
  324. return ret_buf;
  325. }
  326. /**
  327. * ibmvmc_free_hmc_buffer - Free an HMC Buffer
  328. *
  329. * @hmc: ibmvmc_hmc struct
  330. * @buffer: ibmvmc_buffer struct
  331. *
  332. */
  333. static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
  334. struct ibmvmc_buffer *buffer)
  335. {
  336. unsigned long flags;
  337. spin_lock_irqsave(&hmc->lock, flags);
  338. buffer->free = 1;
  339. spin_unlock_irqrestore(&hmc->lock, flags);
  340. }
  341. /**
  342. * ibmvmc_count_hmc_buffers - Count HMC Buffers
  343. *
  344. * @hmc_index: HMC Index field
  345. * @valid: Valid number of buffers field
  346. * @free: Free number of buffers field
  347. *
  348. */
  349. static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
  350. unsigned int *free)
  351. {
  352. struct ibmvmc_buffer *buffer;
  353. unsigned long i;
  354. unsigned long flags;
  355. if (hmc_index > ibmvmc.max_hmc_index)
  356. return;
  357. if (!valid || !free)
  358. return;
  359. *valid = 0; *free = 0;
  360. buffer = hmcs[hmc_index].buffer;
  361. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  362. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  363. if (buffer[i].valid) {
  364. *valid = *valid + 1;
  365. if (buffer[i].free)
  366. *free = *free + 1;
  367. }
  368. }
  369. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  370. }
  371. /**
  372. * ibmvmc_get_free_hmc - Get Free HMC
  373. *
  374. * Return:
  375. * Pointer to an available HMC Connection
  376. * Null otherwise
  377. */
  378. static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
  379. {
  380. unsigned long i;
  381. unsigned long flags;
  382. /*
  383. * Find an available HMC connection.
  384. */
  385. for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
  386. spin_lock_irqsave(&hmcs[i].lock, flags);
  387. if (hmcs[i].state == ibmhmc_state_free) {
  388. hmcs[i].index = i;
  389. hmcs[i].state = ibmhmc_state_initial;
  390. spin_unlock_irqrestore(&hmcs[i].lock, flags);
  391. return &hmcs[i];
  392. }
  393. spin_unlock_irqrestore(&hmcs[i].lock, flags);
  394. }
  395. return NULL;
  396. }
  397. /**
  398. * ibmvmc_return_hmc - Return an HMC Connection
  399. *
  400. * @hmc: ibmvmc_hmc struct
  401. * @release_readers: Number of readers connected to session
  402. *
  403. * This function releases the HMC connections back into the pool.
  404. *
  405. * Return:
  406. * 0 - Success
  407. * Non-zero - Failure
  408. */
  409. static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
  410. {
  411. struct ibmvmc_buffer *buffer;
  412. struct crq_server_adapter *adapter;
  413. struct vio_dev *vdev;
  414. unsigned long i;
  415. unsigned long flags;
  416. if (!hmc || !hmc->adapter)
  417. return -EIO;
  418. if (release_readers) {
  419. if (hmc->file_session) {
  420. struct ibmvmc_file_session *session = hmc->file_session;
  421. session->valid = 0;
  422. wake_up_interruptible(&ibmvmc_read_wait);
  423. }
  424. }
  425. adapter = hmc->adapter;
  426. vdev = to_vio_dev(adapter->dev);
  427. spin_lock_irqsave(&hmc->lock, flags);
  428. hmc->index = 0;
  429. hmc->state = ibmhmc_state_free;
  430. hmc->queue_head = 0;
  431. hmc->queue_tail = 0;
  432. buffer = hmc->buffer;
  433. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  434. if (buffer[i].valid) {
  435. free_dma_buffer(vdev,
  436. ibmvmc.max_mtu,
  437. buffer[i].real_addr_local,
  438. buffer[i].dma_addr_local);
  439. dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
  440. }
  441. memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
  442. hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
  443. }
  444. spin_unlock_irqrestore(&hmc->lock, flags);
  445. return 0;
  446. }
  447. /**
  448. * ibmvmc_send_open - Interface Open
  449. * @buffer: Pointer to ibmvmc_buffer struct
  450. * @hmc: Pointer to ibmvmc_hmc struct
  451. *
  452. * This command is sent by the management partition as the result of a
  453. * management partition device request. It causes the hypervisor to
  454. * prepare a set of data buffers for the management application connection
  455. * indicated HMC idx. A unique HMC Idx would be used if multiple management
  456. * applications running concurrently were desired. Before responding to this
  457. * command, the hypervisor must provide the management partition with at
  458. * least one of these new buffers via the Add Buffer. This indicates whether
  459. * the messages are inbound or outbound from the hypervisor.
  460. *
  461. * Return:
  462. * 0 - Success
  463. * Non-zero - Failure
  464. */
  465. static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
  466. struct ibmvmc_hmc *hmc)
  467. {
  468. struct ibmvmc_crq_msg crq_msg;
  469. struct crq_server_adapter *adapter;
  470. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  471. int rc = 0;
  472. if (!hmc || !hmc->adapter)
  473. return -EIO;
  474. adapter = hmc->adapter;
  475. dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
  476. (unsigned long)buffer->size, (unsigned long)adapter->liobn,
  477. (unsigned long)buffer->dma_addr_local,
  478. (unsigned long)adapter->riobn,
  479. (unsigned long)buffer->dma_addr_remote);
  480. rc = h_copy_rdma(buffer->size,
  481. adapter->liobn,
  482. buffer->dma_addr_local,
  483. adapter->riobn,
  484. buffer->dma_addr_remote);
  485. if (rc) {
  486. dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
  487. rc);
  488. return -EIO;
  489. }
  490. hmc->state = ibmhmc_state_opening;
  491. crq_msg.valid = 0x80;
  492. crq_msg.type = VMC_MSG_OPEN;
  493. crq_msg.status = 0;
  494. crq_msg.var1.rsvd = 0;
  495. crq_msg.hmc_session = hmc->session;
  496. crq_msg.hmc_index = hmc->index;
  497. crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
  498. crq_msg.rsvd = 0;
  499. crq_msg.var3.rsvd = 0;
  500. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  501. be64_to_cpu(crq_as_u64[1]));
  502. return rc;
  503. }
  504. /**
  505. * ibmvmc_send_close - Interface Close
  506. * @hmc: Pointer to ibmvmc_hmc struct
  507. *
  508. * This command is sent by the management partition to terminate a
  509. * management application to hypervisor connection. When this command is
  510. * sent, the management partition has quiesced all I/O operations to all
  511. * buffers associated with this management application connection, and
  512. * has freed any storage for these buffers.
  513. *
  514. * Return:
  515. * 0 - Success
  516. * Non-zero - Failure
  517. */
  518. static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
  519. {
  520. struct ibmvmc_crq_msg crq_msg;
  521. struct crq_server_adapter *adapter;
  522. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  523. int rc = 0;
  524. if (!hmc || !hmc->adapter)
  525. return -EIO;
  526. adapter = hmc->adapter;
  527. dev_info(adapter->dev, "CRQ send: close\n");
  528. crq_msg.valid = 0x80;
  529. crq_msg.type = VMC_MSG_CLOSE;
  530. crq_msg.status = 0;
  531. crq_msg.var1.rsvd = 0;
  532. crq_msg.hmc_session = hmc->session;
  533. crq_msg.hmc_index = hmc->index;
  534. crq_msg.var2.rsvd = 0;
  535. crq_msg.rsvd = 0;
  536. crq_msg.var3.rsvd = 0;
  537. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  538. be64_to_cpu(crq_as_u64[1]));
  539. return rc;
  540. }
  541. /**
  542. * ibmvmc_send_capabilities - Send VMC Capabilities
  543. *
  544. * @adapter: crq_server_adapter struct
  545. *
  546. * The capabilities message is an administrative message sent after the CRQ
  547. * initialization sequence of messages and is used to exchange VMC capabilities
  548. * between the management partition and the hypervisor. The management
  549. * partition must send this message and the hypervisor must respond with VMC
  550. * capabilities Response message before HMC interface message can begin. Any
  551. * HMC interface messages received before the exchange of capabilities has
  552. * complete are dropped.
  553. *
  554. * Return:
  555. * 0 - Success
  556. */
  557. static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
  558. {
  559. struct ibmvmc_admin_crq_msg crq_msg;
  560. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  561. dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
  562. crq_msg.valid = 0x80;
  563. crq_msg.type = VMC_MSG_CAP;
  564. crq_msg.status = 0;
  565. crq_msg.rsvd[0] = 0;
  566. crq_msg.rsvd[1] = 0;
  567. crq_msg.max_hmc = ibmvmc_max_hmcs;
  568. crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
  569. crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
  570. crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
  571. crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
  572. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  573. be64_to_cpu(crq_as_u64[1]));
  574. ibmvmc.state = ibmvmc_state_capabilities;
  575. return 0;
  576. }
  577. /**
  578. * ibmvmc_send_add_buffer_resp - Add Buffer Response
  579. *
  580. * @adapter: crq_server_adapter struct
  581. * @status: Status field
  582. * @hmc_session: HMC Session field
  583. * @hmc_index: HMC Index field
  584. * @buffer_id: Buffer Id field
  585. *
  586. * This command is sent by the management partition to the hypervisor in
  587. * response to the Add Buffer message. The Status field indicates the result of
  588. * the command.
  589. *
  590. * Return:
  591. * 0 - Success
  592. */
  593. static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
  594. u8 status, u8 hmc_session,
  595. u8 hmc_index, u16 buffer_id)
  596. {
  597. struct ibmvmc_crq_msg crq_msg;
  598. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  599. dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
  600. crq_msg.valid = 0x80;
  601. crq_msg.type = VMC_MSG_ADD_BUF_RESP;
  602. crq_msg.status = status;
  603. crq_msg.var1.rsvd = 0;
  604. crq_msg.hmc_session = hmc_session;
  605. crq_msg.hmc_index = hmc_index;
  606. crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
  607. crq_msg.rsvd = 0;
  608. crq_msg.var3.rsvd = 0;
  609. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  610. be64_to_cpu(crq_as_u64[1]));
  611. return 0;
  612. }
  613. /**
  614. * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
  615. *
  616. * @adapter: crq_server_adapter struct
  617. * @status: Status field
  618. * @hmc_session: HMC Session field
  619. * @hmc_index: HMC Index field
  620. * @buffer_id: Buffer Id field
  621. *
  622. * This command is sent by the management partition to the hypervisor in
  623. * response to the Remove Buffer message. The Buffer ID field indicates
  624. * which buffer the management partition selected to remove. The Status
  625. * field indicates the result of the command.
  626. *
  627. * Return:
  628. * 0 - Success
  629. */
  630. static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
  631. u8 status, u8 hmc_session,
  632. u8 hmc_index, u16 buffer_id)
  633. {
  634. struct ibmvmc_crq_msg crq_msg;
  635. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  636. dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
  637. crq_msg.valid = 0x80;
  638. crq_msg.type = VMC_MSG_REM_BUF_RESP;
  639. crq_msg.status = status;
  640. crq_msg.var1.rsvd = 0;
  641. crq_msg.hmc_session = hmc_session;
  642. crq_msg.hmc_index = hmc_index;
  643. crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
  644. crq_msg.rsvd = 0;
  645. crq_msg.var3.rsvd = 0;
  646. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  647. be64_to_cpu(crq_as_u64[1]));
  648. return 0;
  649. }
  650. /**
  651. * ibmvmc_send_msg - Signal Message
  652. *
  653. * @adapter: crq_server_adapter struct
  654. * @buffer: ibmvmc_buffer struct
  655. * @hmc: ibmvmc_hmc struct
  656. * @msg_length: message length field
  657. *
  658. * This command is sent between the management partition and the hypervisor
  659. * in order to signal the arrival of an HMC protocol message. The command
  660. * can be sent by both the management partition and the hypervisor. It is
  661. * used for all traffic between the management application and the hypervisor,
  662. * regardless of who initiated the communication.
  663. *
  664. * There is no response to this message.
  665. *
  666. * Return:
  667. * 0 - Success
  668. * Non-zero - Failure
  669. */
  670. static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
  671. struct ibmvmc_buffer *buffer,
  672. struct ibmvmc_hmc *hmc, int msg_len)
  673. {
  674. struct ibmvmc_crq_msg crq_msg;
  675. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  676. int rc = 0;
  677. dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
  678. rc = h_copy_rdma(msg_len,
  679. adapter->liobn,
  680. buffer->dma_addr_local,
  681. adapter->riobn,
  682. buffer->dma_addr_remote);
  683. if (rc) {
  684. dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
  685. rc);
  686. return rc;
  687. }
  688. crq_msg.valid = 0x80;
  689. crq_msg.type = VMC_MSG_SIGNAL;
  690. crq_msg.status = 0;
  691. crq_msg.var1.rsvd = 0;
  692. crq_msg.hmc_session = hmc->session;
  693. crq_msg.hmc_index = hmc->index;
  694. crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
  695. crq_msg.var3.msg_len = cpu_to_be32(msg_len);
  696. dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
  697. be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
  698. buffer->owner = VMC_BUF_OWNER_HV;
  699. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  700. be64_to_cpu(crq_as_u64[1]));
  701. return rc;
  702. }
  703. /**
  704. * ibmvmc_open - Open Session
  705. *
  706. * @inode: inode struct
  707. * @file: file struct
  708. *
  709. * Return:
  710. * 0 - Success
  711. */
  712. static int ibmvmc_open(struct inode *inode, struct file *file)
  713. {
  714. struct ibmvmc_file_session *session;
  715. int rc = 0;
  716. pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
  717. (unsigned long)inode, (unsigned long)file,
  718. ibmvmc.state);
  719. session = kzalloc(sizeof(*session), GFP_KERNEL);
  720. session->file = file;
  721. file->private_data = session;
  722. return rc;
  723. }
  724. /**
  725. * ibmvmc_close - Close Session
  726. *
  727. * @inode: inode struct
  728. * @file: file struct
  729. *
  730. * Return:
  731. * 0 - Success
  732. * Non-zero - Failure
  733. */
  734. static int ibmvmc_close(struct inode *inode, struct file *file)
  735. {
  736. struct ibmvmc_file_session *session;
  737. struct ibmvmc_hmc *hmc;
  738. int rc = 0;
  739. unsigned long flags;
  740. pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
  741. (unsigned long)file, ibmvmc.state);
  742. session = file->private_data;
  743. if (!session)
  744. return -EIO;
  745. hmc = session->hmc;
  746. if (hmc) {
  747. if (!hmc->adapter)
  748. return -EIO;
  749. if (ibmvmc.state == ibmvmc_state_failed) {
  750. dev_warn(hmc->adapter->dev, "close: state_failed\n");
  751. return -EIO;
  752. }
  753. spin_lock_irqsave(&hmc->lock, flags);
  754. if (hmc->state >= ibmhmc_state_opening) {
  755. rc = ibmvmc_send_close(hmc);
  756. if (rc)
  757. dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
  758. }
  759. spin_unlock_irqrestore(&hmc->lock, flags);
  760. }
  761. kzfree(session);
  762. return rc;
  763. }
  764. /**
  765. * ibmvmc_read - Read
  766. *
  767. * @file: file struct
  768. * @buf: Character buffer
  769. * @nbytes: Size in bytes
  770. * @ppos: Offset
  771. *
  772. * Return:
  773. * 0 - Success
  774. * Non-zero - Failure
  775. */
  776. static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
  777. loff_t *ppos)
  778. {
  779. struct ibmvmc_file_session *session;
  780. struct ibmvmc_hmc *hmc;
  781. struct crq_server_adapter *adapter;
  782. struct ibmvmc_buffer *buffer;
  783. ssize_t n;
  784. ssize_t retval = 0;
  785. unsigned long flags;
  786. DEFINE_WAIT(wait);
  787. pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
  788. (unsigned long)file, (unsigned long)buf,
  789. (unsigned long)nbytes);
  790. if (nbytes == 0)
  791. return 0;
  792. if (nbytes > ibmvmc.max_mtu) {
  793. pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
  794. (unsigned int)nbytes);
  795. return -EINVAL;
  796. }
  797. session = file->private_data;
  798. if (!session) {
  799. pr_warn("ibmvmc: read: no session\n");
  800. return -EIO;
  801. }
  802. hmc = session->hmc;
  803. if (!hmc) {
  804. pr_warn("ibmvmc: read: no hmc\n");
  805. return -EIO;
  806. }
  807. adapter = hmc->adapter;
  808. if (!adapter) {
  809. pr_warn("ibmvmc: read: no adapter\n");
  810. return -EIO;
  811. }
  812. do {
  813. prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
  814. spin_lock_irqsave(&hmc->lock, flags);
  815. if (hmc->queue_tail != hmc->queue_head)
  816. /* Data is available */
  817. break;
  818. spin_unlock_irqrestore(&hmc->lock, flags);
  819. if (!session->valid) {
  820. retval = -EBADFD;
  821. goto out;
  822. }
  823. if (file->f_flags & O_NONBLOCK) {
  824. retval = -EAGAIN;
  825. goto out;
  826. }
  827. schedule();
  828. if (signal_pending(current)) {
  829. retval = -ERESTARTSYS;
  830. goto out;
  831. }
  832. } while (1);
  833. buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
  834. hmc->queue_tail++;
  835. if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
  836. hmc->queue_tail = 0;
  837. spin_unlock_irqrestore(&hmc->lock, flags);
  838. nbytes = min_t(size_t, nbytes, buffer->msg_len);
  839. n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
  840. dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
  841. ibmvmc_free_hmc_buffer(hmc, buffer);
  842. retval = nbytes;
  843. if (n) {
  844. dev_warn(adapter->dev, "read: copy to user failed.\n");
  845. retval = -EFAULT;
  846. }
  847. out:
  848. finish_wait(&ibmvmc_read_wait, &wait);
  849. dev_dbg(adapter->dev, "read: out %ld\n", retval);
  850. return retval;
  851. }
  852. /**
  853. * ibmvmc_poll - Poll
  854. *
  855. * @file: file struct
  856. * @wait: Poll Table
  857. *
  858. * Return:
  859. * poll.h return values
  860. */
  861. static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
  862. {
  863. struct ibmvmc_file_session *session;
  864. struct ibmvmc_hmc *hmc;
  865. unsigned int mask = 0;
  866. session = file->private_data;
  867. if (!session)
  868. return 0;
  869. hmc = session->hmc;
  870. if (!hmc)
  871. return 0;
  872. poll_wait(file, &ibmvmc_read_wait, wait);
  873. if (hmc->queue_head != hmc->queue_tail)
  874. mask |= POLLIN | POLLRDNORM;
  875. return mask;
  876. }
  877. /**
  878. * ibmvmc_write - Write
  879. *
  880. * @file: file struct
  881. * @buf: Character buffer
  882. * @count: Count field
  883. * @ppos: Offset
  884. *
  885. * Return:
  886. * 0 - Success
  887. * Non-zero - Failure
  888. */
  889. static ssize_t ibmvmc_write(struct file *file, const char *buffer,
  890. size_t count, loff_t *ppos)
  891. {
  892. struct ibmvmc_buffer *vmc_buffer;
  893. struct ibmvmc_file_session *session;
  894. struct crq_server_adapter *adapter;
  895. struct ibmvmc_hmc *hmc;
  896. unsigned char *buf;
  897. unsigned long flags;
  898. size_t bytes;
  899. const char *p = buffer;
  900. size_t c = count;
  901. int ret = 0;
  902. session = file->private_data;
  903. if (!session)
  904. return -EIO;
  905. hmc = session->hmc;
  906. if (!hmc)
  907. return -EIO;
  908. spin_lock_irqsave(&hmc->lock, flags);
  909. if (hmc->state == ibmhmc_state_free) {
  910. /* HMC connection is not valid (possibly was reset under us). */
  911. ret = -EIO;
  912. goto out;
  913. }
  914. adapter = hmc->adapter;
  915. if (!adapter) {
  916. ret = -EIO;
  917. goto out;
  918. }
  919. if (count > ibmvmc.max_mtu) {
  920. dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
  921. (unsigned long)count);
  922. ret = -EIO;
  923. goto out;
  924. }
  925. /* Waiting for the open resp message to the ioctl(1) - retry */
  926. if (hmc->state == ibmhmc_state_opening) {
  927. ret = -EBUSY;
  928. goto out;
  929. }
  930. /* Make sure the ioctl() was called & the open msg sent, and that
  931. * the HMC connection has not failed.
  932. */
  933. if (hmc->state != ibmhmc_state_ready) {
  934. ret = -EIO;
  935. goto out;
  936. }
  937. vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
  938. if (!vmc_buffer) {
  939. /* No buffer available for the msg send, or we have not yet
  940. * completed the open/open_resp sequence. Retry until this is
  941. * complete.
  942. */
  943. ret = -EBUSY;
  944. goto out;
  945. }
  946. if (!vmc_buffer->real_addr_local) {
  947. dev_err(adapter->dev, "no buffer storage assigned\n");
  948. ret = -EIO;
  949. goto out;
  950. }
  951. buf = vmc_buffer->real_addr_local;
  952. while (c > 0) {
  953. bytes = min_t(size_t, c, vmc_buffer->size);
  954. bytes -= copy_from_user(buf, p, bytes);
  955. if (!bytes) {
  956. ret = -EFAULT;
  957. goto out;
  958. }
  959. c -= bytes;
  960. p += bytes;
  961. }
  962. if (p == buffer)
  963. goto out;
  964. file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
  965. mark_inode_dirty(file->f_path.dentry->d_inode);
  966. dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
  967. (unsigned long)file, (unsigned long)count);
  968. ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
  969. ret = p - buffer;
  970. out:
  971. spin_unlock_irqrestore(&hmc->lock, flags);
  972. return (ssize_t)(ret);
  973. }
  974. /**
  975. * ibmvmc_setup_hmc - Setup the HMC
  976. *
  977. * @session: ibmvmc_file_session struct
  978. *
  979. * Return:
  980. * 0 - Success
  981. * Non-zero - Failure
  982. */
  983. static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
  984. {
  985. struct ibmvmc_hmc *hmc;
  986. unsigned int valid, free, index;
  987. if (ibmvmc.state == ibmvmc_state_failed) {
  988. pr_warn("ibmvmc: Reserve HMC: state_failed\n");
  989. return -EIO;
  990. }
  991. if (ibmvmc.state < ibmvmc_state_ready) {
  992. pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
  993. return -EAGAIN;
  994. }
  995. /* Device is busy until capabilities have been exchanged and we
  996. * have a generic buffer for each possible HMC connection.
  997. */
  998. for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
  999. valid = 0;
  1000. ibmvmc_count_hmc_buffers(index, &valid, &free);
  1001. if (valid == 0) {
  1002. pr_warn("ibmvmc: buffers not ready for index %d\n",
  1003. index);
  1004. return -ENOBUFS;
  1005. }
  1006. }
  1007. /* Get an hmc object, and transition to ibmhmc_state_initial */
  1008. hmc = ibmvmc_get_free_hmc();
  1009. if (!hmc) {
  1010. pr_warn("%s: free hmc not found\n", __func__);
  1011. return -EBUSY;
  1012. }
  1013. hmc->session = hmc->session + 1;
  1014. if (hmc->session == 0xff)
  1015. hmc->session = 1;
  1016. session->hmc = hmc;
  1017. hmc->adapter = &ibmvmc_adapter;
  1018. hmc->file_session = session;
  1019. session->valid = 1;
  1020. return 0;
  1021. }
  1022. /**
  1023. * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
  1024. *
  1025. * @session: ibmvmc_file_session struct
  1026. * @new_hmc_id: HMC id field
  1027. *
  1028. * IOCTL command to setup the hmc id
  1029. *
  1030. * Return:
  1031. * 0 - Success
  1032. * Non-zero - Failure
  1033. */
  1034. static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
  1035. unsigned char __user *new_hmc_id)
  1036. {
  1037. struct ibmvmc_hmc *hmc;
  1038. struct ibmvmc_buffer *buffer;
  1039. size_t bytes;
  1040. char print_buffer[HMC_ID_LEN + 1];
  1041. unsigned long flags;
  1042. long rc = 0;
  1043. /* Reserve HMC session */
  1044. hmc = session->hmc;
  1045. if (!hmc) {
  1046. rc = ibmvmc_setup_hmc(session);
  1047. if (rc)
  1048. return rc;
  1049. hmc = session->hmc;
  1050. if (!hmc) {
  1051. pr_err("ibmvmc: setup_hmc success but no hmc\n");
  1052. return -EIO;
  1053. }
  1054. }
  1055. if (hmc->state != ibmhmc_state_initial) {
  1056. pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
  1057. hmc->state);
  1058. return -EIO;
  1059. }
  1060. bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
  1061. if (bytes)
  1062. return -EFAULT;
  1063. /* Send Open Session command */
  1064. spin_lock_irqsave(&hmc->lock, flags);
  1065. buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
  1066. spin_unlock_irqrestore(&hmc->lock, flags);
  1067. if (!buffer || !buffer->real_addr_local) {
  1068. pr_warn("ibmvmc: sethmcid: no buffer available\n");
  1069. return -EIO;
  1070. }
  1071. /* Make sure buffer is NULL terminated before trying to print it */
  1072. memset(print_buffer, 0, HMC_ID_LEN + 1);
  1073. strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
  1074. pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
  1075. memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
  1076. /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
  1077. rc = ibmvmc_send_open(buffer, hmc);
  1078. return rc;
  1079. }
  1080. /**
  1081. * ibmvmc_ioctl_query - IOCTL Query
  1082. *
  1083. * @session: ibmvmc_file_session struct
  1084. * @ret_struct: ibmvmc_query_struct
  1085. *
  1086. * Return:
  1087. * 0 - Success
  1088. * Non-zero - Failure
  1089. */
  1090. static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
  1091. struct ibmvmc_query_struct __user *ret_struct)
  1092. {
  1093. struct ibmvmc_query_struct query_struct;
  1094. size_t bytes;
  1095. memset(&query_struct, 0, sizeof(query_struct));
  1096. query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
  1097. query_struct.state = ibmvmc.state;
  1098. query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
  1099. bytes = copy_to_user(ret_struct, &query_struct,
  1100. sizeof(query_struct));
  1101. if (bytes)
  1102. return -EFAULT;
  1103. return 0;
  1104. }
  1105. /**
  1106. * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
  1107. *
  1108. * @session: ibmvmc_file_session struct
  1109. * @ret_vmc_index: VMC Index
  1110. *
  1111. * Return:
  1112. * 0 - Success
  1113. * Non-zero - Failure
  1114. */
  1115. static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
  1116. u32 __user *ret_vmc_index)
  1117. {
  1118. /* TODO: (adreznec) Add locking to control multiple process access */
  1119. size_t bytes;
  1120. long rc;
  1121. u32 vmc_drc_index;
  1122. /* Call to request the VMC device from phyp*/
  1123. rc = h_request_vmc(&vmc_drc_index);
  1124. pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
  1125. if (rc == H_SUCCESS) {
  1126. rc = 0;
  1127. } else if (rc == H_FUNCTION) {
  1128. pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
  1129. return -EPERM;
  1130. } else if (rc == H_AUTHORITY) {
  1131. pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
  1132. return -EPERM;
  1133. } else if (rc == H_HARDWARE) {
  1134. pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
  1135. return -EIO;
  1136. } else if (rc == H_RESOURCE) {
  1137. pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
  1138. return -ENODEV;
  1139. } else if (rc == H_NOT_AVAILABLE) {
  1140. pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
  1141. return -EPERM;
  1142. } else if (rc == H_PARAMETER) {
  1143. pr_err("ibmvmc: requestvmc: invalid parameter\n");
  1144. return -EINVAL;
  1145. }
  1146. /* Success, set the vmc index in global struct */
  1147. ibmvmc.vmc_drc_index = vmc_drc_index;
  1148. bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
  1149. sizeof(*ret_vmc_index));
  1150. if (bytes) {
  1151. pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
  1152. return -EFAULT;
  1153. }
  1154. return rc;
  1155. }
  1156. /**
  1157. * ibmvmc_ioctl - IOCTL
  1158. *
  1159. * @session: ibmvmc_file_session struct
  1160. * @cmd: cmd field
  1161. * @arg: Argument field
  1162. *
  1163. * Return:
  1164. * 0 - Success
  1165. * Non-zero - Failure
  1166. */
  1167. static long ibmvmc_ioctl(struct file *file,
  1168. unsigned int cmd, unsigned long arg)
  1169. {
  1170. struct ibmvmc_file_session *session = file->private_data;
  1171. pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
  1172. (unsigned long)file, cmd, arg,
  1173. (unsigned long)session);
  1174. if (!session) {
  1175. pr_warn("ibmvmc: ioctl: no session\n");
  1176. return -EIO;
  1177. }
  1178. switch (cmd) {
  1179. case VMC_IOCTL_SETHMCID:
  1180. return ibmvmc_ioctl_sethmcid(session,
  1181. (unsigned char __user *)arg);
  1182. case VMC_IOCTL_QUERY:
  1183. return ibmvmc_ioctl_query(session,
  1184. (struct ibmvmc_query_struct __user *)arg);
  1185. case VMC_IOCTL_REQUESTVMC:
  1186. return ibmvmc_ioctl_requestvmc(session,
  1187. (unsigned int __user *)arg);
  1188. default:
  1189. pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
  1190. return -EINVAL;
  1191. }
  1192. }
  1193. static const struct file_operations ibmvmc_fops = {
  1194. .owner = THIS_MODULE,
  1195. .read = ibmvmc_read,
  1196. .write = ibmvmc_write,
  1197. .poll = ibmvmc_poll,
  1198. .unlocked_ioctl = ibmvmc_ioctl,
  1199. .open = ibmvmc_open,
  1200. .release = ibmvmc_close,
  1201. };
  1202. /**
  1203. * ibmvmc_add_buffer - Add Buffer
  1204. *
  1205. * @adapter: crq_server_adapter struct
  1206. * @crq: ibmvmc_crq_msg struct
  1207. *
  1208. * This message transfers a buffer from hypervisor ownership to management
  1209. * partition ownership. The LIOBA is obtained from the virtual TCE table
  1210. * associated with the hypervisor side of the VMC device, and points to a
  1211. * buffer of size MTU (as established in the capabilities exchange).
  1212. *
  1213. * Typical flow for ading buffers:
  1214. * 1. A new management application connection is opened by the management
  1215. * partition.
  1216. * 2. The hypervisor assigns new buffers for the traffic associated with
  1217. * that connection.
  1218. * 3. The hypervisor sends VMC Add Buffer messages to the management
  1219. * partition, informing it of the new buffers.
  1220. * 4. The hypervisor sends an HMC protocol message (to the management
  1221. * application) notifying it of the new buffers. This informs the
  1222. * application that it has buffers available for sending HMC
  1223. * commands.
  1224. *
  1225. * Return:
  1226. * 0 - Success
  1227. * Non-zero - Failure
  1228. */
  1229. static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
  1230. struct ibmvmc_crq_msg *crq)
  1231. {
  1232. struct ibmvmc_buffer *buffer;
  1233. u8 hmc_index;
  1234. u8 hmc_session;
  1235. u16 buffer_id;
  1236. unsigned long flags;
  1237. int rc = 0;
  1238. if (!crq)
  1239. return -1;
  1240. hmc_session = crq->hmc_session;
  1241. hmc_index = crq->hmc_index;
  1242. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1243. if (hmc_index > ibmvmc.max_hmc_index) {
  1244. dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
  1245. hmc_index);
  1246. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1247. hmc_session, hmc_index, buffer_id);
  1248. return -1;
  1249. }
  1250. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1251. dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
  1252. buffer_id);
  1253. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1254. hmc_session, hmc_index, buffer_id);
  1255. return -1;
  1256. }
  1257. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  1258. buffer = &hmcs[hmc_index].buffer[buffer_id];
  1259. if (buffer->real_addr_local || buffer->dma_addr_local) {
  1260. dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
  1261. (unsigned long)buffer_id);
  1262. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1263. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1264. hmc_session, hmc_index, buffer_id);
  1265. return -1;
  1266. }
  1267. buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
  1268. ibmvmc.max_mtu,
  1269. &buffer->dma_addr_local);
  1270. if (!buffer->real_addr_local) {
  1271. dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
  1272. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1273. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
  1274. hmc_session, hmc_index, buffer_id);
  1275. return -1;
  1276. }
  1277. buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
  1278. buffer->size = ibmvmc.max_mtu;
  1279. buffer->owner = crq->var1.owner;
  1280. buffer->free = 1;
  1281. /* Must ensure valid==1 is observable only after all other fields are */
  1282. dma_wmb();
  1283. buffer->valid = 1;
  1284. buffer->id = buffer_id;
  1285. dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
  1286. dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
  1287. hmc_index, hmc_session, buffer_id, buffer->owner);
  1288. dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
  1289. (u32)buffer->dma_addr_local,
  1290. (u32)buffer->dma_addr_remote);
  1291. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1292. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
  1293. hmc_index, buffer_id);
  1294. return rc;
  1295. }
  1296. /**
  1297. * ibmvmc_rem_buffer - Remove Buffer
  1298. *
  1299. * @adapter: crq_server_adapter struct
  1300. * @crq: ibmvmc_crq_msg struct
  1301. *
  1302. * This message requests an HMC buffer to be transferred from management
  1303. * partition ownership to hypervisor ownership. The management partition may
  1304. * not be able to satisfy the request at a particular point in time if all its
  1305. * buffers are in use. The management partition requires a depth of at least
  1306. * one inbound buffer to allow management application commands to flow to the
  1307. * hypervisor. It is, therefore, an interface error for the hypervisor to
  1308. * attempt to remove the management partition's last buffer.
  1309. *
  1310. * The hypervisor is expected to manage buffer usage with the management
  1311. * application directly and inform the management partition when buffers may be
  1312. * removed. The typical flow for removing buffers:
  1313. *
  1314. * 1. The management application no longer needs a communication path to a
  1315. * particular hypervisor function. That function is closed.
  1316. * 2. The hypervisor and the management application quiesce all traffic to that
  1317. * function. The hypervisor requests a reduction in buffer pool size.
  1318. * 3. The management application acknowledges the reduction in buffer pool size.
  1319. * 4. The hypervisor sends a Remove Buffer message to the management partition,
  1320. * informing it of the reduction in buffers.
  1321. * 5. The management partition verifies it can remove the buffer. This is
  1322. * possible if buffers have been quiesced.
  1323. *
  1324. * Return:
  1325. * 0 - Success
  1326. * Non-zero - Failure
  1327. */
  1328. /*
  1329. * The hypervisor requested that we pick an unused buffer, and return it.
  1330. * Before sending the buffer back, we free any storage associated with the
  1331. * buffer.
  1332. */
  1333. static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
  1334. struct ibmvmc_crq_msg *crq)
  1335. {
  1336. struct ibmvmc_buffer *buffer;
  1337. u8 hmc_index;
  1338. u8 hmc_session;
  1339. u16 buffer_id = 0;
  1340. unsigned long flags;
  1341. int rc = 0;
  1342. if (!crq)
  1343. return -1;
  1344. hmc_session = crq->hmc_session;
  1345. hmc_index = crq->hmc_index;
  1346. if (hmc_index > ibmvmc.max_hmc_index) {
  1347. dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
  1348. hmc_index);
  1349. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1350. hmc_session, hmc_index, buffer_id);
  1351. return -1;
  1352. }
  1353. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  1354. buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
  1355. if (!buffer) {
  1356. dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
  1357. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1358. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
  1359. hmc_session, hmc_index,
  1360. VMC_INVALID_BUFFER_ID);
  1361. return -1;
  1362. }
  1363. buffer_id = buffer->id;
  1364. if (buffer->valid)
  1365. free_dma_buffer(to_vio_dev(adapter->dev),
  1366. ibmvmc.max_mtu,
  1367. buffer->real_addr_local,
  1368. buffer->dma_addr_local);
  1369. memset(buffer, 0, sizeof(struct ibmvmc_buffer));
  1370. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1371. dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
  1372. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
  1373. hmc_index, buffer_id);
  1374. return rc;
  1375. }
  1376. static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
  1377. struct ibmvmc_crq_msg *crq)
  1378. {
  1379. struct ibmvmc_buffer *buffer;
  1380. struct ibmvmc_hmc *hmc;
  1381. unsigned long msg_len;
  1382. u8 hmc_index;
  1383. u8 hmc_session;
  1384. u16 buffer_id;
  1385. unsigned long flags;
  1386. int rc = 0;
  1387. if (!crq)
  1388. return -1;
  1389. /* Hypervisor writes CRQs directly into our memory in big endian */
  1390. dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
  1391. be64_to_cpu(*((unsigned long *)crq)),
  1392. be64_to_cpu(*(((unsigned long *)crq) + 1)));
  1393. hmc_session = crq->hmc_session;
  1394. hmc_index = crq->hmc_index;
  1395. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1396. msg_len = be32_to_cpu(crq->var3.msg_len);
  1397. if (hmc_index > ibmvmc.max_hmc_index) {
  1398. dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
  1399. hmc_index);
  1400. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1401. hmc_session, hmc_index, buffer_id);
  1402. return -1;
  1403. }
  1404. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1405. dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
  1406. buffer_id);
  1407. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1408. hmc_session, hmc_index, buffer_id);
  1409. return -1;
  1410. }
  1411. hmc = &hmcs[hmc_index];
  1412. spin_lock_irqsave(&hmc->lock, flags);
  1413. if (hmc->state == ibmhmc_state_free) {
  1414. dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
  1415. hmc->state);
  1416. /* HMC connection is not valid (possibly was reset under us). */
  1417. spin_unlock_irqrestore(&hmc->lock, flags);
  1418. return -1;
  1419. }
  1420. buffer = &hmc->buffer[buffer_id];
  1421. if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
  1422. dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
  1423. buffer->valid, buffer->owner);
  1424. spin_unlock_irqrestore(&hmc->lock, flags);
  1425. return -1;
  1426. }
  1427. /* RDMA the data into the partition. */
  1428. rc = h_copy_rdma(msg_len,
  1429. adapter->riobn,
  1430. buffer->dma_addr_remote,
  1431. adapter->liobn,
  1432. buffer->dma_addr_local);
  1433. dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
  1434. (unsigned int)msg_len, (unsigned int)buffer_id,
  1435. (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
  1436. buffer->msg_len = msg_len;
  1437. buffer->free = 0;
  1438. buffer->owner = VMC_BUF_OWNER_ALPHA;
  1439. if (rc) {
  1440. dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
  1441. rc);
  1442. spin_unlock_irqrestore(&hmc->lock, flags);
  1443. return -1;
  1444. }
  1445. /* Must be locked because read operates on the same data */
  1446. hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
  1447. hmc->queue_head++;
  1448. if (hmc->queue_head == ibmvmc_max_buf_pool_size)
  1449. hmc->queue_head = 0;
  1450. if (hmc->queue_head == hmc->queue_tail)
  1451. dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
  1452. spin_unlock_irqrestore(&hmc->lock, flags);
  1453. wake_up_interruptible(&ibmvmc_read_wait);
  1454. return 0;
  1455. }
  1456. /**
  1457. * ibmvmc_process_capabilities - Process Capabilities
  1458. *
  1459. * @adapter: crq_server_adapter struct
  1460. * @crqp: ibmvmc_crq_msg struct
  1461. *
  1462. */
  1463. static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
  1464. struct ibmvmc_crq_msg *crqp)
  1465. {
  1466. struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
  1467. if ((be16_to_cpu(crq->version) >> 8) !=
  1468. (IBMVMC_PROTOCOL_VERSION >> 8)) {
  1469. dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
  1470. be16_to_cpu(crq->version),
  1471. IBMVMC_PROTOCOL_VERSION);
  1472. ibmvmc.state = ibmvmc_state_failed;
  1473. return;
  1474. }
  1475. ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
  1476. ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
  1477. be16_to_cpu(crq->pool_size));
  1478. ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
  1479. ibmvmc.state = ibmvmc_state_ready;
  1480. dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
  1481. ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
  1482. ibmvmc.max_hmc_index);
  1483. }
  1484. /**
  1485. * ibmvmc_validate_hmc_session - Validate HMC Session
  1486. *
  1487. * @adapter: crq_server_adapter struct
  1488. * @crq: ibmvmc_crq_msg struct
  1489. *
  1490. * Return:
  1491. * 0 - Success
  1492. * Non-zero - Failure
  1493. */
  1494. static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
  1495. struct ibmvmc_crq_msg *crq)
  1496. {
  1497. unsigned char hmc_index;
  1498. hmc_index = crq->hmc_index;
  1499. if (crq->hmc_session == 0)
  1500. return 0;
  1501. if (hmc_index > ibmvmc.max_hmc_index)
  1502. return -1;
  1503. if (hmcs[hmc_index].session != crq->hmc_session) {
  1504. dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
  1505. hmcs[hmc_index].session, crq->hmc_session);
  1506. return -1;
  1507. }
  1508. return 0;
  1509. }
  1510. /**
  1511. * ibmvmc_reset - Reset
  1512. *
  1513. * @adapter: crq_server_adapter struct
  1514. * @xport_event: export_event field
  1515. *
  1516. * Closes all HMC sessions and conditionally schedules a CRQ reset.
  1517. * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
  1518. * If false, we need to schedule a CRQ reset.
  1519. */
  1520. static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
  1521. {
  1522. int i;
  1523. if (ibmvmc.state != ibmvmc_state_sched_reset) {
  1524. dev_info(adapter->dev, "*** Reset to initial state.\n");
  1525. for (i = 0; i < ibmvmc_max_hmcs; i++)
  1526. ibmvmc_return_hmc(&hmcs[i], xport_event);
  1527. if (xport_event) {
  1528. /* CRQ was closed by the partner. We don't need to do
  1529. * anything except set ourself to the correct state to
  1530. * handle init msgs.
  1531. */
  1532. ibmvmc.state = ibmvmc_state_crqinit;
  1533. } else {
  1534. /* The partner did not close their CRQ - instead, we're
  1535. * closing the CRQ on our end. Need to schedule this
  1536. * for process context, because CRQ reset may require a
  1537. * sleep.
  1538. *
  1539. * Setting ibmvmc.state here immediately prevents
  1540. * ibmvmc_open from completing until the reset
  1541. * completes in process context.
  1542. */
  1543. ibmvmc.state = ibmvmc_state_sched_reset;
  1544. dev_dbg(adapter->dev, "Device reset scheduled");
  1545. wake_up_interruptible(&adapter->reset_wait_queue);
  1546. }
  1547. }
  1548. }
  1549. /**
  1550. * ibmvmc_reset_task - Reset Task
  1551. *
  1552. * @data: Data field
  1553. *
  1554. * Performs a CRQ reset of the VMC device in process context.
  1555. * NOTE: This function should not be called directly, use ibmvmc_reset.
  1556. */
  1557. static int ibmvmc_reset_task(void *data)
  1558. {
  1559. struct crq_server_adapter *adapter = data;
  1560. int rc;
  1561. set_user_nice(current, -20);
  1562. while (!kthread_should_stop()) {
  1563. wait_event_interruptible(adapter->reset_wait_queue,
  1564. (ibmvmc.state == ibmvmc_state_sched_reset) ||
  1565. kthread_should_stop());
  1566. if (kthread_should_stop())
  1567. break;
  1568. dev_dbg(adapter->dev, "CRQ resetting in process context");
  1569. tasklet_disable(&adapter->work_task);
  1570. rc = ibmvmc_reset_crq_queue(adapter);
  1571. if (rc != H_SUCCESS && rc != H_RESOURCE) {
  1572. dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
  1573. rc);
  1574. ibmvmc.state = ibmvmc_state_failed;
  1575. } else {
  1576. ibmvmc.state = ibmvmc_state_crqinit;
  1577. if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
  1578. != 0 && rc != H_RESOURCE)
  1579. dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
  1580. }
  1581. vio_enable_interrupts(to_vio_dev(adapter->dev));
  1582. tasklet_enable(&adapter->work_task);
  1583. }
  1584. return 0;
  1585. }
  1586. /**
  1587. * ibmvmc_process_open_resp - Process Open Response
  1588. *
  1589. * @crq: ibmvmc_crq_msg struct
  1590. * @adapter: crq_server_adapter struct
  1591. *
  1592. * This command is sent by the hypervisor in response to the Interface
  1593. * Open message. When this message is received, the indicated buffer is
  1594. * again available for management partition use.
  1595. */
  1596. static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
  1597. struct crq_server_adapter *adapter)
  1598. {
  1599. unsigned char hmc_index;
  1600. unsigned short buffer_id;
  1601. hmc_index = crq->hmc_index;
  1602. if (hmc_index > ibmvmc.max_hmc_index) {
  1603. /* Why would PHYP give an index > max negotiated? */
  1604. ibmvmc_reset(adapter, false);
  1605. return;
  1606. }
  1607. if (crq->status) {
  1608. dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
  1609. crq->status);
  1610. ibmvmc_return_hmc(&hmcs[hmc_index], false);
  1611. return;
  1612. }
  1613. if (hmcs[hmc_index].state == ibmhmc_state_opening) {
  1614. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1615. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1616. dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
  1617. buffer_id);
  1618. hmcs[hmc_index].state = ibmhmc_state_failed;
  1619. } else {
  1620. ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
  1621. &hmcs[hmc_index].buffer[buffer_id]);
  1622. hmcs[hmc_index].state = ibmhmc_state_ready;
  1623. dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
  1624. }
  1625. } else {
  1626. dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
  1627. hmcs[hmc_index].state);
  1628. }
  1629. }
  1630. /**
  1631. * ibmvmc_process_close_resp - Process Close Response
  1632. *
  1633. * @crq: ibmvmc_crq_msg struct
  1634. * @adapter: crq_server_adapter struct
  1635. *
  1636. * This command is sent by the hypervisor in response to the managemant
  1637. * application Interface Close message.
  1638. *
  1639. * If the close fails, simply reset the entire driver as the state of the VMC
  1640. * must be in tough shape.
  1641. */
  1642. static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
  1643. struct crq_server_adapter *adapter)
  1644. {
  1645. unsigned char hmc_index;
  1646. hmc_index = crq->hmc_index;
  1647. if (hmc_index > ibmvmc.max_hmc_index) {
  1648. ibmvmc_reset(adapter, false);
  1649. return;
  1650. }
  1651. if (crq->status) {
  1652. dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
  1653. crq->status);
  1654. ibmvmc_reset(adapter, false);
  1655. return;
  1656. }
  1657. ibmvmc_return_hmc(&hmcs[hmc_index], false);
  1658. }
  1659. /**
  1660. * ibmvmc_crq_process - Process CRQ
  1661. *
  1662. * @adapter: crq_server_adapter struct
  1663. * @crq: ibmvmc_crq_msg struct
  1664. *
  1665. * Process the CRQ message based upon the type of message received.
  1666. *
  1667. */
  1668. static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
  1669. struct ibmvmc_crq_msg *crq)
  1670. {
  1671. switch (crq->type) {
  1672. case VMC_MSG_CAP_RESP:
  1673. dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
  1674. crq->type);
  1675. if (ibmvmc.state == ibmvmc_state_capabilities)
  1676. ibmvmc_process_capabilities(adapter, crq);
  1677. else
  1678. dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
  1679. ibmvmc.state);
  1680. break;
  1681. case VMC_MSG_OPEN_RESP:
  1682. dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
  1683. crq->type);
  1684. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1685. ibmvmc_process_open_resp(crq, adapter);
  1686. break;
  1687. case VMC_MSG_ADD_BUF:
  1688. dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
  1689. crq->type);
  1690. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1691. ibmvmc_add_buffer(adapter, crq);
  1692. break;
  1693. case VMC_MSG_REM_BUF:
  1694. dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
  1695. crq->type);
  1696. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1697. ibmvmc_rem_buffer(adapter, crq);
  1698. break;
  1699. case VMC_MSG_SIGNAL:
  1700. dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
  1701. crq->type);
  1702. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1703. ibmvmc_recv_msg(adapter, crq);
  1704. break;
  1705. case VMC_MSG_CLOSE_RESP:
  1706. dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
  1707. crq->type);
  1708. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1709. ibmvmc_process_close_resp(crq, adapter);
  1710. break;
  1711. case VMC_MSG_CAP:
  1712. case VMC_MSG_OPEN:
  1713. case VMC_MSG_CLOSE:
  1714. case VMC_MSG_ADD_BUF_RESP:
  1715. case VMC_MSG_REM_BUF_RESP:
  1716. dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
  1717. crq->type);
  1718. break;
  1719. default:
  1720. dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
  1721. crq->type);
  1722. break;
  1723. }
  1724. }
  1725. /**
  1726. * ibmvmc_handle_crq_init - Handle CRQ Init
  1727. *
  1728. * @crq: ibmvmc_crq_msg struct
  1729. * @adapter: crq_server_adapter struct
  1730. *
  1731. * Handle the type of crq initialization based on whether
  1732. * it is a message or a response.
  1733. *
  1734. */
  1735. static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
  1736. struct crq_server_adapter *adapter)
  1737. {
  1738. switch (crq->type) {
  1739. case 0x01: /* Initialization message */
  1740. dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
  1741. ibmvmc.state);
  1742. if (ibmvmc.state == ibmvmc_state_crqinit) {
  1743. /* Send back a response */
  1744. if (ibmvmc_send_crq(adapter, 0xC002000000000000,
  1745. 0) == 0)
  1746. ibmvmc_send_capabilities(adapter);
  1747. else
  1748. dev_err(adapter->dev, " Unable to send init rsp\n");
  1749. } else {
  1750. dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
  1751. ibmvmc.state, ibmvmc.max_mtu);
  1752. }
  1753. break;
  1754. case 0x02: /* Initialization response */
  1755. dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
  1756. ibmvmc.state);
  1757. if (ibmvmc.state == ibmvmc_state_crqinit)
  1758. ibmvmc_send_capabilities(adapter);
  1759. break;
  1760. default:
  1761. dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
  1762. (unsigned long)crq->type);
  1763. }
  1764. }
  1765. /**
  1766. * ibmvmc_handle_crq - Handle CRQ
  1767. *
  1768. * @crq: ibmvmc_crq_msg struct
  1769. * @adapter: crq_server_adapter struct
  1770. *
  1771. * Read the command elements from the command queue and execute the
  1772. * requests based upon the type of crq message.
  1773. *
  1774. */
  1775. static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
  1776. struct crq_server_adapter *adapter)
  1777. {
  1778. switch (crq->valid) {
  1779. case 0xC0: /* initialization */
  1780. ibmvmc_handle_crq_init(crq, adapter);
  1781. break;
  1782. case 0xFF: /* Hypervisor telling us the connection is closed */
  1783. dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
  1784. ibmvmc_reset(adapter, true);
  1785. break;
  1786. case 0x80: /* real payload */
  1787. ibmvmc_crq_process(adapter, crq);
  1788. break;
  1789. default:
  1790. dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
  1791. crq->valid);
  1792. break;
  1793. }
  1794. }
  1795. static void ibmvmc_task(unsigned long data)
  1796. {
  1797. struct crq_server_adapter *adapter =
  1798. (struct crq_server_adapter *)data;
  1799. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  1800. struct ibmvmc_crq_msg *crq;
  1801. int done = 0;
  1802. while (!done) {
  1803. /* Pull all the valid messages off the CRQ */
  1804. while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
  1805. ibmvmc_handle_crq(crq, adapter);
  1806. crq->valid = 0x00;
  1807. /* CRQ reset was requested, stop processing CRQs.
  1808. * Interrupts will be re-enabled by the reset task.
  1809. */
  1810. if (ibmvmc.state == ibmvmc_state_sched_reset)
  1811. return;
  1812. }
  1813. vio_enable_interrupts(vdev);
  1814. crq = crq_queue_next_crq(&adapter->queue);
  1815. if (crq) {
  1816. vio_disable_interrupts(vdev);
  1817. ibmvmc_handle_crq(crq, adapter);
  1818. crq->valid = 0x00;
  1819. /* CRQ reset was requested, stop processing CRQs.
  1820. * Interrupts will be re-enabled by the reset task.
  1821. */
  1822. if (ibmvmc.state == ibmvmc_state_sched_reset)
  1823. return;
  1824. } else {
  1825. done = 1;
  1826. }
  1827. }
  1828. }
  1829. /**
  1830. * ibmvmc_init_crq_queue - Init CRQ Queue
  1831. *
  1832. * @adapter: crq_server_adapter struct
  1833. *
  1834. * Return:
  1835. * 0 - Success
  1836. * Non-zero - Failure
  1837. */
  1838. static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
  1839. {
  1840. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  1841. struct crq_queue *queue = &adapter->queue;
  1842. int rc = 0;
  1843. int retrc = 0;
  1844. queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
  1845. if (!queue->msgs)
  1846. goto malloc_failed;
  1847. queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  1848. queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
  1849. queue->size * sizeof(*queue->msgs),
  1850. DMA_BIDIRECTIONAL);
  1851. if (dma_mapping_error(adapter->dev, queue->msg_token))
  1852. goto map_failed;
  1853. retrc = plpar_hcall_norets(H_REG_CRQ,
  1854. vdev->unit_address,
  1855. queue->msg_token, PAGE_SIZE);
  1856. rc = retrc;
  1857. if (rc == H_RESOURCE)
  1858. rc = ibmvmc_reset_crq_queue(adapter);
  1859. if (rc == 2) {
  1860. dev_warn(adapter->dev, "Partner adapter not ready\n");
  1861. retrc = 0;
  1862. } else if (rc != 0) {
  1863. dev_err(adapter->dev, "Error %d opening adapter\n", rc);
  1864. goto reg_crq_failed;
  1865. }
  1866. queue->cur = 0;
  1867. spin_lock_init(&queue->lock);
  1868. tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
  1869. if (request_irq(vdev->irq,
  1870. ibmvmc_handle_event,
  1871. 0, "ibmvmc", (void *)adapter) != 0) {
  1872. dev_err(adapter->dev, "couldn't register irq 0x%x\n",
  1873. vdev->irq);
  1874. goto req_irq_failed;
  1875. }
  1876. rc = vio_enable_interrupts(vdev);
  1877. if (rc != 0) {
  1878. dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
  1879. goto req_irq_failed;
  1880. }
  1881. return retrc;
  1882. req_irq_failed:
  1883. /* Cannot have any work since we either never got our IRQ registered,
  1884. * or never got interrupts enabled
  1885. */
  1886. tasklet_kill(&adapter->work_task);
  1887. h_free_crq(vdev->unit_address);
  1888. reg_crq_failed:
  1889. dma_unmap_single(adapter->dev,
  1890. queue->msg_token,
  1891. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  1892. map_failed:
  1893. free_page((unsigned long)queue->msgs);
  1894. malloc_failed:
  1895. return -ENOMEM;
  1896. }
  1897. /* Fill in the liobn and riobn fields on the adapter */
  1898. static int read_dma_window(struct vio_dev *vdev,
  1899. struct crq_server_adapter *adapter)
  1900. {
  1901. const __be32 *dma_window;
  1902. const __be32 *prop;
  1903. /* TODO Using of_parse_dma_window would be better, but it doesn't give
  1904. * a way to read multiple windows without already knowing the size of
  1905. * a window or the number of windows
  1906. */
  1907. dma_window =
  1908. (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
  1909. NULL);
  1910. if (!dma_window) {
  1911. dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
  1912. return -1;
  1913. }
  1914. adapter->liobn = be32_to_cpu(*dma_window);
  1915. dma_window++;
  1916. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
  1917. NULL);
  1918. if (!prop) {
  1919. dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
  1920. dma_window++;
  1921. } else {
  1922. dma_window += be32_to_cpu(*prop);
  1923. }
  1924. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
  1925. NULL);
  1926. if (!prop) {
  1927. dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
  1928. dma_window++;
  1929. } else {
  1930. dma_window += be32_to_cpu(*prop);
  1931. }
  1932. /* dma_window should point to the second window now */
  1933. adapter->riobn = be32_to_cpu(*dma_window);
  1934. return 0;
  1935. }
  1936. static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1937. {
  1938. struct crq_server_adapter *adapter = &ibmvmc_adapter;
  1939. int rc;
  1940. dev_set_drvdata(&vdev->dev, NULL);
  1941. memset(adapter, 0, sizeof(*adapter));
  1942. adapter->dev = &vdev->dev;
  1943. dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
  1944. rc = read_dma_window(vdev, adapter);
  1945. if (rc != 0) {
  1946. ibmvmc.state = ibmvmc_state_failed;
  1947. return -1;
  1948. }
  1949. dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
  1950. adapter->liobn, adapter->riobn);
  1951. init_waitqueue_head(&adapter->reset_wait_queue);
  1952. adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
  1953. if (IS_ERR(adapter->reset_task)) {
  1954. dev_err(adapter->dev, "Failed to start reset thread\n");
  1955. ibmvmc.state = ibmvmc_state_failed;
  1956. rc = PTR_ERR(adapter->reset_task);
  1957. adapter->reset_task = NULL;
  1958. return rc;
  1959. }
  1960. rc = ibmvmc_init_crq_queue(adapter);
  1961. if (rc != 0 && rc != H_RESOURCE) {
  1962. dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
  1963. rc);
  1964. ibmvmc.state = ibmvmc_state_failed;
  1965. goto crq_failed;
  1966. }
  1967. ibmvmc.state = ibmvmc_state_crqinit;
  1968. /* Try to send an initialization message. Note that this is allowed
  1969. * to fail if the other end is not acive. In that case we just wait
  1970. * for the other side to initialize.
  1971. */
  1972. if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
  1973. rc != H_RESOURCE)
  1974. dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
  1975. dev_set_drvdata(&vdev->dev, adapter);
  1976. return 0;
  1977. crq_failed:
  1978. kthread_stop(adapter->reset_task);
  1979. adapter->reset_task = NULL;
  1980. return -EPERM;
  1981. }
  1982. static int ibmvmc_remove(struct vio_dev *vdev)
  1983. {
  1984. struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
  1985. dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
  1986. vdev->unit_address);
  1987. ibmvmc_release_crq_queue(adapter);
  1988. return 0;
  1989. }
  1990. static struct vio_device_id ibmvmc_device_table[] = {
  1991. { "ibm,vmc", "IBM,vmc" },
  1992. { "", "" }
  1993. };
  1994. MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
  1995. static struct vio_driver ibmvmc_driver = {
  1996. .name = ibmvmc_driver_name,
  1997. .id_table = ibmvmc_device_table,
  1998. .probe = ibmvmc_probe,
  1999. .remove = ibmvmc_remove,
  2000. };
  2001. static void __init ibmvmc_scrub_module_parms(void)
  2002. {
  2003. if (ibmvmc_max_mtu > MAX_MTU) {
  2004. pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
  2005. ibmvmc_max_mtu = MAX_MTU;
  2006. } else if (ibmvmc_max_mtu < MIN_MTU) {
  2007. pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
  2008. ibmvmc_max_mtu = MIN_MTU;
  2009. }
  2010. if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
  2011. pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
  2012. MAX_BUF_POOL_SIZE);
  2013. ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
  2014. } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
  2015. pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
  2016. MIN_BUF_POOL_SIZE);
  2017. ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
  2018. }
  2019. if (ibmvmc_max_hmcs > MAX_HMCS) {
  2020. pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
  2021. ibmvmc_max_hmcs = MAX_HMCS;
  2022. } else if (ibmvmc_max_hmcs < MIN_HMCS) {
  2023. pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
  2024. ibmvmc_max_hmcs = MIN_HMCS;
  2025. }
  2026. }
  2027. static struct miscdevice ibmvmc_miscdev = {
  2028. .name = ibmvmc_driver_name,
  2029. .minor = MISC_DYNAMIC_MINOR,
  2030. .fops = &ibmvmc_fops,
  2031. };
  2032. static int __init ibmvmc_module_init(void)
  2033. {
  2034. int rc, i, j;
  2035. ibmvmc.state = ibmvmc_state_initial;
  2036. pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
  2037. rc = misc_register(&ibmvmc_miscdev);
  2038. if (rc) {
  2039. pr_err("ibmvmc: misc registration failed\n");
  2040. goto misc_register_failed;
  2041. }
  2042. pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
  2043. ibmvmc_miscdev.minor);
  2044. /* Initialize data structures */
  2045. memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
  2046. for (i = 0; i < MAX_HMCS; i++) {
  2047. spin_lock_init(&hmcs[i].lock);
  2048. hmcs[i].state = ibmhmc_state_free;
  2049. for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
  2050. hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
  2051. }
  2052. /* Sanity check module parms */
  2053. ibmvmc_scrub_module_parms();
  2054. /*
  2055. * Initialize some reasonable values. Might be negotiated smaller
  2056. * values during the capabilities exchange.
  2057. */
  2058. ibmvmc.max_mtu = ibmvmc_max_mtu;
  2059. ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
  2060. ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
  2061. rc = vio_register_driver(&ibmvmc_driver);
  2062. if (rc) {
  2063. pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
  2064. goto vio_reg_failed;
  2065. }
  2066. return 0;
  2067. vio_reg_failed:
  2068. misc_deregister(&ibmvmc_miscdev);
  2069. misc_register_failed:
  2070. return rc;
  2071. }
  2072. static void __exit ibmvmc_module_exit(void)
  2073. {
  2074. pr_info("ibmvmc: module exit\n");
  2075. vio_unregister_driver(&ibmvmc_driver);
  2076. misc_deregister(&ibmvmc_miscdev);
  2077. }
  2078. module_init(ibmvmc_module_init);
  2079. module_exit(ibmvmc_module_exit);
  2080. module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
  2081. int, 0644);
  2082. MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
  2083. module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
  2084. MODULE_PARM_DESC(max_hmcs, "Max HMCs");
  2085. module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
  2086. MODULE_PARM_DESC(max_mtu, "Max MTU");
  2087. MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
  2088. MODULE_DESCRIPTION("IBM VMC");
  2089. MODULE_VERSION(IBMVMC_DRIVER_VERSION);
  2090. MODULE_LICENSE("GPL v2");