vmbus_drv.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/irq.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/sysctl.h>
  30. #include <linux/slab.h>
  31. #include <linux/acpi.h>
  32. #include <linux/completion.h>
  33. #include <linux/hyperv.h>
  34. #include <linux/kernel_stat.h>
  35. #include <asm/hyperv.h>
  36. #include <asm/hypervisor.h>
  37. #include <asm/mshyperv.h>
  38. #include "hyperv_vmbus.h"
  39. static struct acpi_device *hv_acpi_dev;
  40. static struct tasklet_struct msg_dpc;
  41. static struct completion probe_event;
  42. static int irq;
  43. static int vmbus_exists(void)
  44. {
  45. if (hv_acpi_dev == NULL)
  46. return -ENODEV;
  47. return 0;
  48. }
  49. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  50. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  51. {
  52. int i;
  53. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  54. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  55. }
  56. static u8 channel_monitor_group(struct vmbus_channel *channel)
  57. {
  58. return (u8)channel->offermsg.monitorid / 32;
  59. }
  60. static u8 channel_monitor_offset(struct vmbus_channel *channel)
  61. {
  62. return (u8)channel->offermsg.monitorid % 32;
  63. }
  64. static u32 channel_pending(struct vmbus_channel *channel,
  65. struct hv_monitor_page *monitor_page)
  66. {
  67. u8 monitor_group = channel_monitor_group(channel);
  68. return monitor_page->trigger_group[monitor_group].pending;
  69. }
  70. static u32 channel_latency(struct vmbus_channel *channel,
  71. struct hv_monitor_page *monitor_page)
  72. {
  73. u8 monitor_group = channel_monitor_group(channel);
  74. u8 monitor_offset = channel_monitor_offset(channel);
  75. return monitor_page->latency[monitor_group][monitor_offset];
  76. }
  77. static u32 channel_conn_id(struct vmbus_channel *channel,
  78. struct hv_monitor_page *monitor_page)
  79. {
  80. u8 monitor_group = channel_monitor_group(channel);
  81. u8 monitor_offset = channel_monitor_offset(channel);
  82. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  83. }
  84. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  85. char *buf)
  86. {
  87. struct hv_device *hv_dev = device_to_hv_device(dev);
  88. if (!hv_dev->channel)
  89. return -ENODEV;
  90. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  91. }
  92. static DEVICE_ATTR_RO(id);
  93. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  94. char *buf)
  95. {
  96. struct hv_device *hv_dev = device_to_hv_device(dev);
  97. if (!hv_dev->channel)
  98. return -ENODEV;
  99. return sprintf(buf, "%d\n", hv_dev->channel->state);
  100. }
  101. static DEVICE_ATTR_RO(state);
  102. static ssize_t monitor_id_show(struct device *dev,
  103. struct device_attribute *dev_attr, char *buf)
  104. {
  105. struct hv_device *hv_dev = device_to_hv_device(dev);
  106. if (!hv_dev->channel)
  107. return -ENODEV;
  108. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  109. }
  110. static DEVICE_ATTR_RO(monitor_id);
  111. static ssize_t class_id_show(struct device *dev,
  112. struct device_attribute *dev_attr, char *buf)
  113. {
  114. struct hv_device *hv_dev = device_to_hv_device(dev);
  115. if (!hv_dev->channel)
  116. return -ENODEV;
  117. return sprintf(buf, "{%pUl}\n",
  118. hv_dev->channel->offermsg.offer.if_type.b);
  119. }
  120. static DEVICE_ATTR_RO(class_id);
  121. static ssize_t device_id_show(struct device *dev,
  122. struct device_attribute *dev_attr, char *buf)
  123. {
  124. struct hv_device *hv_dev = device_to_hv_device(dev);
  125. if (!hv_dev->channel)
  126. return -ENODEV;
  127. return sprintf(buf, "{%pUl}\n",
  128. hv_dev->channel->offermsg.offer.if_instance.b);
  129. }
  130. static DEVICE_ATTR_RO(device_id);
  131. static ssize_t modalias_show(struct device *dev,
  132. struct device_attribute *dev_attr, char *buf)
  133. {
  134. struct hv_device *hv_dev = device_to_hv_device(dev);
  135. char alias_name[VMBUS_ALIAS_LEN + 1];
  136. print_alias_name(hv_dev, alias_name);
  137. return sprintf(buf, "vmbus:%s\n", alias_name);
  138. }
  139. static DEVICE_ATTR_RO(modalias);
  140. static ssize_t server_monitor_pending_show(struct device *dev,
  141. struct device_attribute *dev_attr,
  142. char *buf)
  143. {
  144. struct hv_device *hv_dev = device_to_hv_device(dev);
  145. if (!hv_dev->channel)
  146. return -ENODEV;
  147. return sprintf(buf, "%d\n",
  148. channel_pending(hv_dev->channel,
  149. vmbus_connection.monitor_pages[1]));
  150. }
  151. static DEVICE_ATTR_RO(server_monitor_pending);
  152. static ssize_t client_monitor_pending_show(struct device *dev,
  153. struct device_attribute *dev_attr,
  154. char *buf)
  155. {
  156. struct hv_device *hv_dev = device_to_hv_device(dev);
  157. if (!hv_dev->channel)
  158. return -ENODEV;
  159. return sprintf(buf, "%d\n",
  160. channel_pending(hv_dev->channel,
  161. vmbus_connection.monitor_pages[1]));
  162. }
  163. static DEVICE_ATTR_RO(client_monitor_pending);
  164. static ssize_t server_monitor_latency_show(struct device *dev,
  165. struct device_attribute *dev_attr,
  166. char *buf)
  167. {
  168. struct hv_device *hv_dev = device_to_hv_device(dev);
  169. if (!hv_dev->channel)
  170. return -ENODEV;
  171. return sprintf(buf, "%d\n",
  172. channel_latency(hv_dev->channel,
  173. vmbus_connection.monitor_pages[0]));
  174. }
  175. static DEVICE_ATTR_RO(server_monitor_latency);
  176. static ssize_t client_monitor_latency_show(struct device *dev,
  177. struct device_attribute *dev_attr,
  178. char *buf)
  179. {
  180. struct hv_device *hv_dev = device_to_hv_device(dev);
  181. if (!hv_dev->channel)
  182. return -ENODEV;
  183. return sprintf(buf, "%d\n",
  184. channel_latency(hv_dev->channel,
  185. vmbus_connection.monitor_pages[1]));
  186. }
  187. static DEVICE_ATTR_RO(client_monitor_latency);
  188. static ssize_t server_monitor_conn_id_show(struct device *dev,
  189. struct device_attribute *dev_attr,
  190. char *buf)
  191. {
  192. struct hv_device *hv_dev = device_to_hv_device(dev);
  193. if (!hv_dev->channel)
  194. return -ENODEV;
  195. return sprintf(buf, "%d\n",
  196. channel_conn_id(hv_dev->channel,
  197. vmbus_connection.monitor_pages[0]));
  198. }
  199. static DEVICE_ATTR_RO(server_monitor_conn_id);
  200. static ssize_t client_monitor_conn_id_show(struct device *dev,
  201. struct device_attribute *dev_attr,
  202. char *buf)
  203. {
  204. struct hv_device *hv_dev = device_to_hv_device(dev);
  205. if (!hv_dev->channel)
  206. return -ENODEV;
  207. return sprintf(buf, "%d\n",
  208. channel_conn_id(hv_dev->channel,
  209. vmbus_connection.monitor_pages[1]));
  210. }
  211. static DEVICE_ATTR_RO(client_monitor_conn_id);
  212. static ssize_t out_intr_mask_show(struct device *dev,
  213. struct device_attribute *dev_attr, char *buf)
  214. {
  215. struct hv_device *hv_dev = device_to_hv_device(dev);
  216. struct hv_ring_buffer_debug_info outbound;
  217. if (!hv_dev->channel)
  218. return -ENODEV;
  219. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  220. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  221. }
  222. static DEVICE_ATTR_RO(out_intr_mask);
  223. static ssize_t out_read_index_show(struct device *dev,
  224. struct device_attribute *dev_attr, char *buf)
  225. {
  226. struct hv_device *hv_dev = device_to_hv_device(dev);
  227. struct hv_ring_buffer_debug_info outbound;
  228. if (!hv_dev->channel)
  229. return -ENODEV;
  230. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  231. return sprintf(buf, "%d\n", outbound.current_read_index);
  232. }
  233. static DEVICE_ATTR_RO(out_read_index);
  234. static ssize_t out_write_index_show(struct device *dev,
  235. struct device_attribute *dev_attr,
  236. char *buf)
  237. {
  238. struct hv_device *hv_dev = device_to_hv_device(dev);
  239. struct hv_ring_buffer_debug_info outbound;
  240. if (!hv_dev->channel)
  241. return -ENODEV;
  242. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  243. return sprintf(buf, "%d\n", outbound.current_write_index);
  244. }
  245. static DEVICE_ATTR_RO(out_write_index);
  246. static ssize_t out_read_bytes_avail_show(struct device *dev,
  247. struct device_attribute *dev_attr,
  248. char *buf)
  249. {
  250. struct hv_device *hv_dev = device_to_hv_device(dev);
  251. struct hv_ring_buffer_debug_info outbound;
  252. if (!hv_dev->channel)
  253. return -ENODEV;
  254. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  255. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  256. }
  257. static DEVICE_ATTR_RO(out_read_bytes_avail);
  258. static ssize_t out_write_bytes_avail_show(struct device *dev,
  259. struct device_attribute *dev_attr,
  260. char *buf)
  261. {
  262. struct hv_device *hv_dev = device_to_hv_device(dev);
  263. struct hv_ring_buffer_debug_info outbound;
  264. if (!hv_dev->channel)
  265. return -ENODEV;
  266. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  267. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  268. }
  269. static DEVICE_ATTR_RO(out_write_bytes_avail);
  270. static ssize_t in_intr_mask_show(struct device *dev,
  271. struct device_attribute *dev_attr, char *buf)
  272. {
  273. struct hv_device *hv_dev = device_to_hv_device(dev);
  274. struct hv_ring_buffer_debug_info inbound;
  275. if (!hv_dev->channel)
  276. return -ENODEV;
  277. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  278. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  279. }
  280. static DEVICE_ATTR_RO(in_intr_mask);
  281. static ssize_t in_read_index_show(struct device *dev,
  282. struct device_attribute *dev_attr, char *buf)
  283. {
  284. struct hv_device *hv_dev = device_to_hv_device(dev);
  285. struct hv_ring_buffer_debug_info inbound;
  286. if (!hv_dev->channel)
  287. return -ENODEV;
  288. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  289. return sprintf(buf, "%d\n", inbound.current_read_index);
  290. }
  291. static DEVICE_ATTR_RO(in_read_index);
  292. static ssize_t in_write_index_show(struct device *dev,
  293. struct device_attribute *dev_attr, char *buf)
  294. {
  295. struct hv_device *hv_dev = device_to_hv_device(dev);
  296. struct hv_ring_buffer_debug_info inbound;
  297. if (!hv_dev->channel)
  298. return -ENODEV;
  299. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  300. return sprintf(buf, "%d\n", inbound.current_write_index);
  301. }
  302. static DEVICE_ATTR_RO(in_write_index);
  303. static ssize_t in_read_bytes_avail_show(struct device *dev,
  304. struct device_attribute *dev_attr,
  305. char *buf)
  306. {
  307. struct hv_device *hv_dev = device_to_hv_device(dev);
  308. struct hv_ring_buffer_debug_info inbound;
  309. if (!hv_dev->channel)
  310. return -ENODEV;
  311. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  312. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  313. }
  314. static DEVICE_ATTR_RO(in_read_bytes_avail);
  315. static ssize_t in_write_bytes_avail_show(struct device *dev,
  316. struct device_attribute *dev_attr,
  317. char *buf)
  318. {
  319. struct hv_device *hv_dev = device_to_hv_device(dev);
  320. struct hv_ring_buffer_debug_info inbound;
  321. if (!hv_dev->channel)
  322. return -ENODEV;
  323. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  324. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  325. }
  326. static DEVICE_ATTR_RO(in_write_bytes_avail);
  327. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  328. static struct attribute *vmbus_attrs[] = {
  329. &dev_attr_id.attr,
  330. &dev_attr_state.attr,
  331. &dev_attr_monitor_id.attr,
  332. &dev_attr_class_id.attr,
  333. &dev_attr_device_id.attr,
  334. &dev_attr_modalias.attr,
  335. &dev_attr_server_monitor_pending.attr,
  336. &dev_attr_client_monitor_pending.attr,
  337. &dev_attr_server_monitor_latency.attr,
  338. &dev_attr_client_monitor_latency.attr,
  339. &dev_attr_server_monitor_conn_id.attr,
  340. &dev_attr_client_monitor_conn_id.attr,
  341. &dev_attr_out_intr_mask.attr,
  342. &dev_attr_out_read_index.attr,
  343. &dev_attr_out_write_index.attr,
  344. &dev_attr_out_read_bytes_avail.attr,
  345. &dev_attr_out_write_bytes_avail.attr,
  346. &dev_attr_in_intr_mask.attr,
  347. &dev_attr_in_read_index.attr,
  348. &dev_attr_in_write_index.attr,
  349. &dev_attr_in_read_bytes_avail.attr,
  350. &dev_attr_in_write_bytes_avail.attr,
  351. NULL,
  352. };
  353. ATTRIBUTE_GROUPS(vmbus);
  354. /*
  355. * vmbus_uevent - add uevent for our device
  356. *
  357. * This routine is invoked when a device is added or removed on the vmbus to
  358. * generate a uevent to udev in the userspace. The udev will then look at its
  359. * rule and the uevent generated here to load the appropriate driver
  360. *
  361. * The alias string will be of the form vmbus:guid where guid is the string
  362. * representation of the device guid (each byte of the guid will be
  363. * represented with two hex characters.
  364. */
  365. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  366. {
  367. struct hv_device *dev = device_to_hv_device(device);
  368. int ret;
  369. char alias_name[VMBUS_ALIAS_LEN + 1];
  370. print_alias_name(dev, alias_name);
  371. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  372. return ret;
  373. }
  374. static uuid_le null_guid;
  375. static inline bool is_null_guid(const __u8 *guid)
  376. {
  377. if (memcmp(guid, &null_guid, sizeof(uuid_le)))
  378. return false;
  379. return true;
  380. }
  381. /*
  382. * Return a matching hv_vmbus_device_id pointer.
  383. * If there is no match, return NULL.
  384. */
  385. static const struct hv_vmbus_device_id *hv_vmbus_get_id(
  386. const struct hv_vmbus_device_id *id,
  387. __u8 *guid)
  388. {
  389. for (; !is_null_guid(id->guid); id++)
  390. if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
  391. return id;
  392. return NULL;
  393. }
  394. /*
  395. * vmbus_match - Attempt to match the specified device to the specified driver
  396. */
  397. static int vmbus_match(struct device *device, struct device_driver *driver)
  398. {
  399. struct hv_driver *drv = drv_to_hv_drv(driver);
  400. struct hv_device *hv_dev = device_to_hv_device(device);
  401. if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
  402. return 1;
  403. return 0;
  404. }
  405. /*
  406. * vmbus_probe - Add the new vmbus's child device
  407. */
  408. static int vmbus_probe(struct device *child_device)
  409. {
  410. int ret = 0;
  411. struct hv_driver *drv =
  412. drv_to_hv_drv(child_device->driver);
  413. struct hv_device *dev = device_to_hv_device(child_device);
  414. const struct hv_vmbus_device_id *dev_id;
  415. dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
  416. if (drv->probe) {
  417. ret = drv->probe(dev, dev_id);
  418. if (ret != 0)
  419. pr_err("probe failed for device %s (%d)\n",
  420. dev_name(child_device), ret);
  421. } else {
  422. pr_err("probe not set for driver %s\n",
  423. dev_name(child_device));
  424. ret = -ENODEV;
  425. }
  426. return ret;
  427. }
  428. /*
  429. * vmbus_remove - Remove a vmbus device
  430. */
  431. static int vmbus_remove(struct device *child_device)
  432. {
  433. struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
  434. struct hv_device *dev = device_to_hv_device(child_device);
  435. if (drv->remove)
  436. drv->remove(dev);
  437. else
  438. pr_err("remove not set for driver %s\n",
  439. dev_name(child_device));
  440. return 0;
  441. }
  442. /*
  443. * vmbus_shutdown - Shutdown a vmbus device
  444. */
  445. static void vmbus_shutdown(struct device *child_device)
  446. {
  447. struct hv_driver *drv;
  448. struct hv_device *dev = device_to_hv_device(child_device);
  449. /* The device may not be attached yet */
  450. if (!child_device->driver)
  451. return;
  452. drv = drv_to_hv_drv(child_device->driver);
  453. if (drv->shutdown)
  454. drv->shutdown(dev);
  455. return;
  456. }
  457. /*
  458. * vmbus_device_release - Final callback release of the vmbus child device
  459. */
  460. static void vmbus_device_release(struct device *device)
  461. {
  462. struct hv_device *hv_dev = device_to_hv_device(device);
  463. kfree(hv_dev);
  464. }
  465. /* The one and only one */
  466. static struct bus_type hv_bus = {
  467. .name = "vmbus",
  468. .match = vmbus_match,
  469. .shutdown = vmbus_shutdown,
  470. .remove = vmbus_remove,
  471. .probe = vmbus_probe,
  472. .uevent = vmbus_uevent,
  473. .dev_groups = vmbus_groups,
  474. };
  475. static const char *driver_name = "hyperv";
  476. struct onmessage_work_context {
  477. struct work_struct work;
  478. struct hv_message msg;
  479. };
  480. static void vmbus_onmessage_work(struct work_struct *work)
  481. {
  482. struct onmessage_work_context *ctx;
  483. ctx = container_of(work, struct onmessage_work_context,
  484. work);
  485. vmbus_onmessage(&ctx->msg);
  486. kfree(ctx);
  487. }
  488. static void vmbus_on_msg_dpc(unsigned long data)
  489. {
  490. int cpu = smp_processor_id();
  491. void *page_addr = hv_context.synic_message_page[cpu];
  492. struct hv_message *msg = (struct hv_message *)page_addr +
  493. VMBUS_MESSAGE_SINT;
  494. struct onmessage_work_context *ctx;
  495. while (1) {
  496. if (msg->header.message_type == HVMSG_NONE) {
  497. /* no msg */
  498. break;
  499. } else {
  500. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  501. if (ctx == NULL)
  502. continue;
  503. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  504. memcpy(&ctx->msg, msg, sizeof(*msg));
  505. queue_work(vmbus_connection.work_queue, &ctx->work);
  506. }
  507. msg->header.message_type = HVMSG_NONE;
  508. /*
  509. * Make sure the write to MessageType (ie set to
  510. * HVMSG_NONE) happens before we read the
  511. * MessagePending and EOMing. Otherwise, the EOMing
  512. * will not deliver any more messages since there is
  513. * no empty slot
  514. */
  515. mb();
  516. if (msg->header.message_flags.msg_pending) {
  517. /*
  518. * This will cause message queue rescan to
  519. * possibly deliver another msg from the
  520. * hypervisor
  521. */
  522. wrmsrl(HV_X64_MSR_EOM, 0);
  523. }
  524. }
  525. }
  526. static irqreturn_t vmbus_isr(int irq, void *dev_id)
  527. {
  528. int cpu = smp_processor_id();
  529. void *page_addr;
  530. struct hv_message *msg;
  531. union hv_synic_event_flags *event;
  532. bool handled = false;
  533. page_addr = hv_context.synic_event_page[cpu];
  534. if (page_addr == NULL)
  535. return IRQ_NONE;
  536. event = (union hv_synic_event_flags *)page_addr +
  537. VMBUS_MESSAGE_SINT;
  538. /*
  539. * Check for events before checking for messages. This is the order
  540. * in which events and messages are checked in Windows guests on
  541. * Hyper-V, and the Windows team suggested we do the same.
  542. */
  543. if ((vmbus_proto_version == VERSION_WS2008) ||
  544. (vmbus_proto_version == VERSION_WIN7)) {
  545. /* Since we are a child, we only need to check bit 0 */
  546. if (sync_test_and_clear_bit(0,
  547. (unsigned long *) &event->flags32[0])) {
  548. handled = true;
  549. }
  550. } else {
  551. /*
  552. * Our host is win8 or above. The signaling mechanism
  553. * has changed and we can directly look at the event page.
  554. * If bit n is set then we have an interrup on the channel
  555. * whose id is n.
  556. */
  557. handled = true;
  558. }
  559. if (handled)
  560. tasklet_schedule(hv_context.event_dpc[cpu]);
  561. page_addr = hv_context.synic_message_page[cpu];
  562. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  563. /* Check if there are actual msgs to be processed */
  564. if (msg->header.message_type != HVMSG_NONE) {
  565. handled = true;
  566. tasklet_schedule(&msg_dpc);
  567. }
  568. if (handled)
  569. return IRQ_HANDLED;
  570. else
  571. return IRQ_NONE;
  572. }
  573. /*
  574. * vmbus interrupt flow handler:
  575. * vmbus interrupts can concurrently occur on multiple CPUs and
  576. * can be handled concurrently.
  577. */
  578. static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
  579. {
  580. kstat_incr_irqs_this_cpu(irq, desc);
  581. desc->action->handler(irq, desc->action->dev_id);
  582. }
  583. /*
  584. * vmbus_bus_init -Main vmbus driver initialization routine.
  585. *
  586. * Here, we
  587. * - initialize the vmbus driver context
  588. * - invoke the vmbus hv main init routine
  589. * - get the irq resource
  590. * - retrieve the channel offers
  591. */
  592. static int vmbus_bus_init(int irq)
  593. {
  594. int ret;
  595. /* Hypervisor initialization...setup hypercall page..etc */
  596. ret = hv_init();
  597. if (ret != 0) {
  598. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  599. return ret;
  600. }
  601. tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
  602. ret = bus_register(&hv_bus);
  603. if (ret)
  604. goto err_cleanup;
  605. ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
  606. if (ret != 0) {
  607. pr_err("Unable to request IRQ %d\n",
  608. irq);
  609. goto err_unregister;
  610. }
  611. /*
  612. * Vmbus interrupts can be handled concurrently on
  613. * different CPUs. Establish an appropriate interrupt flow
  614. * handler that can support this model.
  615. */
  616. irq_set_handler(irq, vmbus_flow_handler);
  617. /*
  618. * Register our interrupt handler.
  619. */
  620. hv_register_vmbus_handler(irq, vmbus_isr);
  621. ret = hv_synic_alloc();
  622. if (ret)
  623. goto err_alloc;
  624. /*
  625. * Initialize the per-cpu interrupt state and
  626. * connect to the host.
  627. */
  628. on_each_cpu(hv_synic_init, NULL, 1);
  629. ret = vmbus_connect();
  630. if (ret)
  631. goto err_alloc;
  632. vmbus_request_offers();
  633. return 0;
  634. err_alloc:
  635. hv_synic_free();
  636. free_irq(irq, hv_acpi_dev);
  637. err_unregister:
  638. bus_unregister(&hv_bus);
  639. err_cleanup:
  640. hv_cleanup();
  641. return ret;
  642. }
  643. /**
  644. * __vmbus_child_driver_register - Register a vmbus's driver
  645. * @drv: Pointer to driver structure you want to register
  646. * @owner: owner module of the drv
  647. * @mod_name: module name string
  648. *
  649. * Registers the given driver with Linux through the 'driver_register()' call
  650. * and sets up the hyper-v vmbus handling for this driver.
  651. * It will return the state of the 'driver_register()' call.
  652. *
  653. */
  654. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  655. {
  656. int ret;
  657. pr_info("registering driver %s\n", hv_driver->name);
  658. ret = vmbus_exists();
  659. if (ret < 0)
  660. return ret;
  661. hv_driver->driver.name = hv_driver->name;
  662. hv_driver->driver.owner = owner;
  663. hv_driver->driver.mod_name = mod_name;
  664. hv_driver->driver.bus = &hv_bus;
  665. ret = driver_register(&hv_driver->driver);
  666. return ret;
  667. }
  668. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  669. /**
  670. * vmbus_driver_unregister() - Unregister a vmbus's driver
  671. * @drv: Pointer to driver structure you want to un-register
  672. *
  673. * Un-register the given driver that was previous registered with a call to
  674. * vmbus_driver_register()
  675. */
  676. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  677. {
  678. pr_info("unregistering driver %s\n", hv_driver->name);
  679. if (!vmbus_exists())
  680. driver_unregister(&hv_driver->driver);
  681. }
  682. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  683. /*
  684. * vmbus_device_create - Creates and registers a new child device
  685. * on the vmbus.
  686. */
  687. struct hv_device *vmbus_device_create(uuid_le *type,
  688. uuid_le *instance,
  689. struct vmbus_channel *channel)
  690. {
  691. struct hv_device *child_device_obj;
  692. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  693. if (!child_device_obj) {
  694. pr_err("Unable to allocate device object for child device\n");
  695. return NULL;
  696. }
  697. child_device_obj->channel = channel;
  698. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  699. memcpy(&child_device_obj->dev_instance, instance,
  700. sizeof(uuid_le));
  701. return child_device_obj;
  702. }
  703. /*
  704. * vmbus_device_register - Register the child device
  705. */
  706. int vmbus_device_register(struct hv_device *child_device_obj)
  707. {
  708. int ret = 0;
  709. static atomic_t device_num = ATOMIC_INIT(0);
  710. dev_set_name(&child_device_obj->device, "vmbus_0_%d",
  711. atomic_inc_return(&device_num));
  712. child_device_obj->device.bus = &hv_bus;
  713. child_device_obj->device.parent = &hv_acpi_dev->dev;
  714. child_device_obj->device.release = vmbus_device_release;
  715. /*
  716. * Register with the LDM. This will kick off the driver/device
  717. * binding...which will eventually call vmbus_match() and vmbus_probe()
  718. */
  719. ret = device_register(&child_device_obj->device);
  720. if (ret)
  721. pr_err("Unable to register child device\n");
  722. else
  723. pr_debug("child device %s registered\n",
  724. dev_name(&child_device_obj->device));
  725. return ret;
  726. }
  727. /*
  728. * vmbus_device_unregister - Remove the specified child device
  729. * from the vmbus.
  730. */
  731. void vmbus_device_unregister(struct hv_device *device_obj)
  732. {
  733. pr_debug("child device %s unregistered\n",
  734. dev_name(&device_obj->device));
  735. /*
  736. * Kick off the process of unregistering the device.
  737. * This will call vmbus_remove() and eventually vmbus_device_release()
  738. */
  739. device_unregister(&device_obj->device);
  740. }
  741. /*
  742. * VMBUS is an acpi enumerated device. Get the the IRQ information
  743. * from DSDT.
  744. */
  745. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
  746. {
  747. if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
  748. struct acpi_resource_irq *irqp;
  749. irqp = &res->data.irq;
  750. *((unsigned int *)irq) = irqp->interrupts[0];
  751. }
  752. return AE_OK;
  753. }
  754. static int vmbus_acpi_add(struct acpi_device *device)
  755. {
  756. acpi_status result;
  757. hv_acpi_dev = device;
  758. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  759. vmbus_walk_resources, &irq);
  760. if (ACPI_FAILURE(result)) {
  761. complete(&probe_event);
  762. return -ENODEV;
  763. }
  764. complete(&probe_event);
  765. return 0;
  766. }
  767. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  768. {"VMBUS", 0},
  769. {"VMBus", 0},
  770. {"", 0},
  771. };
  772. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  773. static struct acpi_driver vmbus_acpi_driver = {
  774. .name = "vmbus",
  775. .ids = vmbus_acpi_device_ids,
  776. .ops = {
  777. .add = vmbus_acpi_add,
  778. },
  779. };
  780. static int __init hv_acpi_init(void)
  781. {
  782. int ret, t;
  783. if (x86_hyper != &x86_hyper_ms_hyperv)
  784. return -ENODEV;
  785. init_completion(&probe_event);
  786. /*
  787. * Get irq resources first.
  788. */
  789. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  790. if (ret)
  791. return ret;
  792. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  793. if (t == 0) {
  794. ret = -ETIMEDOUT;
  795. goto cleanup;
  796. }
  797. if (irq <= 0) {
  798. ret = -ENODEV;
  799. goto cleanup;
  800. }
  801. ret = vmbus_bus_init(irq);
  802. if (ret)
  803. goto cleanup;
  804. return 0;
  805. cleanup:
  806. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  807. hv_acpi_dev = NULL;
  808. return ret;
  809. }
  810. static void __exit vmbus_exit(void)
  811. {
  812. free_irq(irq, hv_acpi_dev);
  813. vmbus_free_channels();
  814. bus_unregister(&hv_bus);
  815. hv_cleanup();
  816. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  817. }
  818. MODULE_LICENSE("GPL");
  819. subsys_initcall(hv_acpi_init);
  820. module_exit(vmbus_exit);