vmbus_drv.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/irq.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/sysctl.h>
  30. #include <linux/slab.h>
  31. #include <linux/acpi.h>
  32. #include <acpi/acpi_bus.h>
  33. #include <linux/completion.h>
  34. #include <linux/hyperv.h>
  35. #include <linux/kernel_stat.h>
  36. #include <asm/hyperv.h>
  37. #include <asm/hypervisor.h>
  38. #include <asm/mshyperv.h>
  39. #include "hyperv_vmbus.h"
  40. static struct acpi_device *hv_acpi_dev;
  41. static struct tasklet_struct msg_dpc;
  42. static struct completion probe_event;
  43. static int irq;
  44. static int vmbus_exists(void)
  45. {
  46. if (hv_acpi_dev == NULL)
  47. return -ENODEV;
  48. return 0;
  49. }
  50. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  51. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  52. {
  53. int i;
  54. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  55. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  56. }
  57. static u8 channel_monitor_group(struct vmbus_channel *channel)
  58. {
  59. return (u8)channel->offermsg.monitorid / 32;
  60. }
  61. static u8 channel_monitor_offset(struct vmbus_channel *channel)
  62. {
  63. return (u8)channel->offermsg.monitorid % 32;
  64. }
  65. static u32 channel_pending(struct vmbus_channel *channel,
  66. struct hv_monitor_page *monitor_page)
  67. {
  68. u8 monitor_group = channel_monitor_group(channel);
  69. return monitor_page->trigger_group[monitor_group].pending;
  70. }
  71. static u32 channel_latency(struct vmbus_channel *channel,
  72. struct hv_monitor_page *monitor_page)
  73. {
  74. u8 monitor_group = channel_monitor_group(channel);
  75. u8 monitor_offset = channel_monitor_offset(channel);
  76. return monitor_page->latency[monitor_group][monitor_offset];
  77. }
  78. static u32 channel_conn_id(struct vmbus_channel *channel,
  79. struct hv_monitor_page *monitor_page)
  80. {
  81. u8 monitor_group = channel_monitor_group(channel);
  82. u8 monitor_offset = channel_monitor_offset(channel);
  83. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  84. }
  85. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  86. char *buf)
  87. {
  88. struct hv_device *hv_dev = device_to_hv_device(dev);
  89. if (!hv_dev->channel)
  90. return -ENODEV;
  91. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  92. }
  93. static DEVICE_ATTR_RO(id);
  94. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  95. char *buf)
  96. {
  97. struct hv_device *hv_dev = device_to_hv_device(dev);
  98. if (!hv_dev->channel)
  99. return -ENODEV;
  100. return sprintf(buf, "%d\n", hv_dev->channel->state);
  101. }
  102. static DEVICE_ATTR_RO(state);
  103. static ssize_t monitor_id_show(struct device *dev,
  104. struct device_attribute *dev_attr, char *buf)
  105. {
  106. struct hv_device *hv_dev = device_to_hv_device(dev);
  107. if (!hv_dev->channel)
  108. return -ENODEV;
  109. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  110. }
  111. static DEVICE_ATTR_RO(monitor_id);
  112. static ssize_t class_id_show(struct device *dev,
  113. struct device_attribute *dev_attr, char *buf)
  114. {
  115. struct hv_device *hv_dev = device_to_hv_device(dev);
  116. if (!hv_dev->channel)
  117. return -ENODEV;
  118. return sprintf(buf, "{%pUl}\n",
  119. hv_dev->channel->offermsg.offer.if_type.b);
  120. }
  121. static DEVICE_ATTR_RO(class_id);
  122. static ssize_t device_id_show(struct device *dev,
  123. struct device_attribute *dev_attr, char *buf)
  124. {
  125. struct hv_device *hv_dev = device_to_hv_device(dev);
  126. if (!hv_dev->channel)
  127. return -ENODEV;
  128. return sprintf(buf, "{%pUl}\n",
  129. hv_dev->channel->offermsg.offer.if_instance.b);
  130. }
  131. static DEVICE_ATTR_RO(device_id);
  132. static ssize_t modalias_show(struct device *dev,
  133. struct device_attribute *dev_attr, char *buf)
  134. {
  135. struct hv_device *hv_dev = device_to_hv_device(dev);
  136. char alias_name[VMBUS_ALIAS_LEN + 1];
  137. print_alias_name(hv_dev, alias_name);
  138. return sprintf(buf, "vmbus:%s\n", alias_name);
  139. }
  140. static DEVICE_ATTR_RO(modalias);
  141. static ssize_t server_monitor_pending_show(struct device *dev,
  142. struct device_attribute *dev_attr,
  143. char *buf)
  144. {
  145. struct hv_device *hv_dev = device_to_hv_device(dev);
  146. if (!hv_dev->channel)
  147. return -ENODEV;
  148. return sprintf(buf, "%d\n",
  149. channel_pending(hv_dev->channel,
  150. vmbus_connection.monitor_pages[1]));
  151. }
  152. static DEVICE_ATTR_RO(server_monitor_pending);
  153. static ssize_t client_monitor_pending_show(struct device *dev,
  154. struct device_attribute *dev_attr,
  155. char *buf)
  156. {
  157. struct hv_device *hv_dev = device_to_hv_device(dev);
  158. if (!hv_dev->channel)
  159. return -ENODEV;
  160. return sprintf(buf, "%d\n",
  161. channel_pending(hv_dev->channel,
  162. vmbus_connection.monitor_pages[1]));
  163. }
  164. static DEVICE_ATTR_RO(client_monitor_pending);
  165. static ssize_t server_monitor_latency_show(struct device *dev,
  166. struct device_attribute *dev_attr,
  167. char *buf)
  168. {
  169. struct hv_device *hv_dev = device_to_hv_device(dev);
  170. if (!hv_dev->channel)
  171. return -ENODEV;
  172. return sprintf(buf, "%d\n",
  173. channel_latency(hv_dev->channel,
  174. vmbus_connection.monitor_pages[0]));
  175. }
  176. static DEVICE_ATTR_RO(server_monitor_latency);
  177. static ssize_t client_monitor_latency_show(struct device *dev,
  178. struct device_attribute *dev_attr,
  179. char *buf)
  180. {
  181. struct hv_device *hv_dev = device_to_hv_device(dev);
  182. if (!hv_dev->channel)
  183. return -ENODEV;
  184. return sprintf(buf, "%d\n",
  185. channel_latency(hv_dev->channel,
  186. vmbus_connection.monitor_pages[1]));
  187. }
  188. static DEVICE_ATTR_RO(client_monitor_latency);
  189. static ssize_t server_monitor_conn_id_show(struct device *dev,
  190. struct device_attribute *dev_attr,
  191. char *buf)
  192. {
  193. struct hv_device *hv_dev = device_to_hv_device(dev);
  194. if (!hv_dev->channel)
  195. return -ENODEV;
  196. return sprintf(buf, "%d\n",
  197. channel_conn_id(hv_dev->channel,
  198. vmbus_connection.monitor_pages[0]));
  199. }
  200. static DEVICE_ATTR_RO(server_monitor_conn_id);
  201. static ssize_t client_monitor_conn_id_show(struct device *dev,
  202. struct device_attribute *dev_attr,
  203. char *buf)
  204. {
  205. struct hv_device *hv_dev = device_to_hv_device(dev);
  206. if (!hv_dev->channel)
  207. return -ENODEV;
  208. return sprintf(buf, "%d\n",
  209. channel_conn_id(hv_dev->channel,
  210. vmbus_connection.monitor_pages[1]));
  211. }
  212. static DEVICE_ATTR_RO(client_monitor_conn_id);
  213. static ssize_t out_intr_mask_show(struct device *dev,
  214. struct device_attribute *dev_attr, char *buf)
  215. {
  216. struct hv_device *hv_dev = device_to_hv_device(dev);
  217. struct hv_ring_buffer_debug_info outbound;
  218. if (!hv_dev->channel)
  219. return -ENODEV;
  220. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  221. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  222. }
  223. static DEVICE_ATTR_RO(out_intr_mask);
  224. static ssize_t out_read_index_show(struct device *dev,
  225. struct device_attribute *dev_attr, char *buf)
  226. {
  227. struct hv_device *hv_dev = device_to_hv_device(dev);
  228. struct hv_ring_buffer_debug_info outbound;
  229. if (!hv_dev->channel)
  230. return -ENODEV;
  231. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  232. return sprintf(buf, "%d\n", outbound.current_read_index);
  233. }
  234. static DEVICE_ATTR_RO(out_read_index);
  235. static ssize_t out_write_index_show(struct device *dev,
  236. struct device_attribute *dev_attr,
  237. char *buf)
  238. {
  239. struct hv_device *hv_dev = device_to_hv_device(dev);
  240. struct hv_ring_buffer_debug_info outbound;
  241. if (!hv_dev->channel)
  242. return -ENODEV;
  243. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  244. return sprintf(buf, "%d\n", outbound.current_write_index);
  245. }
  246. static DEVICE_ATTR_RO(out_write_index);
  247. static ssize_t out_read_bytes_avail_show(struct device *dev,
  248. struct device_attribute *dev_attr,
  249. char *buf)
  250. {
  251. struct hv_device *hv_dev = device_to_hv_device(dev);
  252. struct hv_ring_buffer_debug_info outbound;
  253. if (!hv_dev->channel)
  254. return -ENODEV;
  255. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  256. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  257. }
  258. static DEVICE_ATTR_RO(out_read_bytes_avail);
  259. static ssize_t out_write_bytes_avail_show(struct device *dev,
  260. struct device_attribute *dev_attr,
  261. char *buf)
  262. {
  263. struct hv_device *hv_dev = device_to_hv_device(dev);
  264. struct hv_ring_buffer_debug_info outbound;
  265. if (!hv_dev->channel)
  266. return -ENODEV;
  267. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  268. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  269. }
  270. static DEVICE_ATTR_RO(out_write_bytes_avail);
  271. static ssize_t in_intr_mask_show(struct device *dev,
  272. struct device_attribute *dev_attr, char *buf)
  273. {
  274. struct hv_device *hv_dev = device_to_hv_device(dev);
  275. struct hv_ring_buffer_debug_info inbound;
  276. if (!hv_dev->channel)
  277. return -ENODEV;
  278. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  279. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  280. }
  281. static DEVICE_ATTR_RO(in_intr_mask);
  282. static ssize_t in_read_index_show(struct device *dev,
  283. struct device_attribute *dev_attr, char *buf)
  284. {
  285. struct hv_device *hv_dev = device_to_hv_device(dev);
  286. struct hv_ring_buffer_debug_info inbound;
  287. if (!hv_dev->channel)
  288. return -ENODEV;
  289. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  290. return sprintf(buf, "%d\n", inbound.current_read_index);
  291. }
  292. static DEVICE_ATTR_RO(in_read_index);
  293. static ssize_t in_write_index_show(struct device *dev,
  294. struct device_attribute *dev_attr, char *buf)
  295. {
  296. struct hv_device *hv_dev = device_to_hv_device(dev);
  297. struct hv_ring_buffer_debug_info inbound;
  298. if (!hv_dev->channel)
  299. return -ENODEV;
  300. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  301. return sprintf(buf, "%d\n", inbound.current_write_index);
  302. }
  303. static DEVICE_ATTR_RO(in_write_index);
  304. static ssize_t in_read_bytes_avail_show(struct device *dev,
  305. struct device_attribute *dev_attr,
  306. char *buf)
  307. {
  308. struct hv_device *hv_dev = device_to_hv_device(dev);
  309. struct hv_ring_buffer_debug_info inbound;
  310. if (!hv_dev->channel)
  311. return -ENODEV;
  312. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  313. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  314. }
  315. static DEVICE_ATTR_RO(in_read_bytes_avail);
  316. static ssize_t in_write_bytes_avail_show(struct device *dev,
  317. struct device_attribute *dev_attr,
  318. char *buf)
  319. {
  320. struct hv_device *hv_dev = device_to_hv_device(dev);
  321. struct hv_ring_buffer_debug_info inbound;
  322. if (!hv_dev->channel)
  323. return -ENODEV;
  324. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  325. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  326. }
  327. static DEVICE_ATTR_RO(in_write_bytes_avail);
  328. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  329. static struct attribute *vmbus_attrs[] = {
  330. &dev_attr_id.attr,
  331. &dev_attr_state.attr,
  332. &dev_attr_monitor_id.attr,
  333. &dev_attr_class_id.attr,
  334. &dev_attr_device_id.attr,
  335. &dev_attr_modalias.attr,
  336. &dev_attr_server_monitor_pending.attr,
  337. &dev_attr_client_monitor_pending.attr,
  338. &dev_attr_server_monitor_latency.attr,
  339. &dev_attr_client_monitor_latency.attr,
  340. &dev_attr_server_monitor_conn_id.attr,
  341. &dev_attr_client_monitor_conn_id.attr,
  342. &dev_attr_out_intr_mask.attr,
  343. &dev_attr_out_read_index.attr,
  344. &dev_attr_out_write_index.attr,
  345. &dev_attr_out_read_bytes_avail.attr,
  346. &dev_attr_out_write_bytes_avail.attr,
  347. &dev_attr_in_intr_mask.attr,
  348. &dev_attr_in_read_index.attr,
  349. &dev_attr_in_write_index.attr,
  350. &dev_attr_in_read_bytes_avail.attr,
  351. &dev_attr_in_write_bytes_avail.attr,
  352. NULL,
  353. };
  354. ATTRIBUTE_GROUPS(vmbus);
  355. /*
  356. * vmbus_uevent - add uevent for our device
  357. *
  358. * This routine is invoked when a device is added or removed on the vmbus to
  359. * generate a uevent to udev in the userspace. The udev will then look at its
  360. * rule and the uevent generated here to load the appropriate driver
  361. *
  362. * The alias string will be of the form vmbus:guid where guid is the string
  363. * representation of the device guid (each byte of the guid will be
  364. * represented with two hex characters.
  365. */
  366. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  367. {
  368. struct hv_device *dev = device_to_hv_device(device);
  369. int ret;
  370. char alias_name[VMBUS_ALIAS_LEN + 1];
  371. print_alias_name(dev, alias_name);
  372. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  373. return ret;
  374. }
  375. static uuid_le null_guid;
  376. static inline bool is_null_guid(const __u8 *guid)
  377. {
  378. if (memcmp(guid, &null_guid, sizeof(uuid_le)))
  379. return false;
  380. return true;
  381. }
  382. /*
  383. * Return a matching hv_vmbus_device_id pointer.
  384. * If there is no match, return NULL.
  385. */
  386. static const struct hv_vmbus_device_id *hv_vmbus_get_id(
  387. const struct hv_vmbus_device_id *id,
  388. __u8 *guid)
  389. {
  390. for (; !is_null_guid(id->guid); id++)
  391. if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
  392. return id;
  393. return NULL;
  394. }
  395. /*
  396. * vmbus_match - Attempt to match the specified device to the specified driver
  397. */
  398. static int vmbus_match(struct device *device, struct device_driver *driver)
  399. {
  400. struct hv_driver *drv = drv_to_hv_drv(driver);
  401. struct hv_device *hv_dev = device_to_hv_device(device);
  402. if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
  403. return 1;
  404. return 0;
  405. }
  406. /*
  407. * vmbus_probe - Add the new vmbus's child device
  408. */
  409. static int vmbus_probe(struct device *child_device)
  410. {
  411. int ret = 0;
  412. struct hv_driver *drv =
  413. drv_to_hv_drv(child_device->driver);
  414. struct hv_device *dev = device_to_hv_device(child_device);
  415. const struct hv_vmbus_device_id *dev_id;
  416. dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
  417. if (drv->probe) {
  418. ret = drv->probe(dev, dev_id);
  419. if (ret != 0)
  420. pr_err("probe failed for device %s (%d)\n",
  421. dev_name(child_device), ret);
  422. } else {
  423. pr_err("probe not set for driver %s\n",
  424. dev_name(child_device));
  425. ret = -ENODEV;
  426. }
  427. return ret;
  428. }
  429. /*
  430. * vmbus_remove - Remove a vmbus device
  431. */
  432. static int vmbus_remove(struct device *child_device)
  433. {
  434. struct hv_driver *drv = drv_to_hv_drv(child_device->driver);
  435. struct hv_device *dev = device_to_hv_device(child_device);
  436. if (drv->remove)
  437. drv->remove(dev);
  438. else
  439. pr_err("remove not set for driver %s\n",
  440. dev_name(child_device));
  441. return 0;
  442. }
  443. /*
  444. * vmbus_shutdown - Shutdown a vmbus device
  445. */
  446. static void vmbus_shutdown(struct device *child_device)
  447. {
  448. struct hv_driver *drv;
  449. struct hv_device *dev = device_to_hv_device(child_device);
  450. /* The device may not be attached yet */
  451. if (!child_device->driver)
  452. return;
  453. drv = drv_to_hv_drv(child_device->driver);
  454. if (drv->shutdown)
  455. drv->shutdown(dev);
  456. return;
  457. }
  458. /*
  459. * vmbus_device_release - Final callback release of the vmbus child device
  460. */
  461. static void vmbus_device_release(struct device *device)
  462. {
  463. struct hv_device *hv_dev = device_to_hv_device(device);
  464. kfree(hv_dev);
  465. }
  466. /* The one and only one */
  467. static struct bus_type hv_bus = {
  468. .name = "vmbus",
  469. .match = vmbus_match,
  470. .shutdown = vmbus_shutdown,
  471. .remove = vmbus_remove,
  472. .probe = vmbus_probe,
  473. .uevent = vmbus_uevent,
  474. .dev_groups = vmbus_groups,
  475. };
  476. static const char *driver_name = "hyperv";
  477. struct onmessage_work_context {
  478. struct work_struct work;
  479. struct hv_message msg;
  480. };
  481. static void vmbus_onmessage_work(struct work_struct *work)
  482. {
  483. struct onmessage_work_context *ctx;
  484. ctx = container_of(work, struct onmessage_work_context,
  485. work);
  486. vmbus_onmessage(&ctx->msg);
  487. kfree(ctx);
  488. }
  489. static void vmbus_on_msg_dpc(unsigned long data)
  490. {
  491. int cpu = smp_processor_id();
  492. void *page_addr = hv_context.synic_message_page[cpu];
  493. struct hv_message *msg = (struct hv_message *)page_addr +
  494. VMBUS_MESSAGE_SINT;
  495. struct onmessage_work_context *ctx;
  496. while (1) {
  497. if (msg->header.message_type == HVMSG_NONE) {
  498. /* no msg */
  499. break;
  500. } else {
  501. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  502. if (ctx == NULL)
  503. continue;
  504. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  505. memcpy(&ctx->msg, msg, sizeof(*msg));
  506. queue_work(vmbus_connection.work_queue, &ctx->work);
  507. }
  508. msg->header.message_type = HVMSG_NONE;
  509. /*
  510. * Make sure the write to MessageType (ie set to
  511. * HVMSG_NONE) happens before we read the
  512. * MessagePending and EOMing. Otherwise, the EOMing
  513. * will not deliver any more messages since there is
  514. * no empty slot
  515. */
  516. mb();
  517. if (msg->header.message_flags.msg_pending) {
  518. /*
  519. * This will cause message queue rescan to
  520. * possibly deliver another msg from the
  521. * hypervisor
  522. */
  523. wrmsrl(HV_X64_MSR_EOM, 0);
  524. }
  525. }
  526. }
  527. static irqreturn_t vmbus_isr(int irq, void *dev_id)
  528. {
  529. int cpu = smp_processor_id();
  530. void *page_addr;
  531. struct hv_message *msg;
  532. union hv_synic_event_flags *event;
  533. bool handled = false;
  534. page_addr = hv_context.synic_event_page[cpu];
  535. if (page_addr == NULL)
  536. return IRQ_NONE;
  537. event = (union hv_synic_event_flags *)page_addr +
  538. VMBUS_MESSAGE_SINT;
  539. /*
  540. * Check for events before checking for messages. This is the order
  541. * in which events and messages are checked in Windows guests on
  542. * Hyper-V, and the Windows team suggested we do the same.
  543. */
  544. if ((vmbus_proto_version == VERSION_WS2008) ||
  545. (vmbus_proto_version == VERSION_WIN7)) {
  546. /* Since we are a child, we only need to check bit 0 */
  547. if (sync_test_and_clear_bit(0,
  548. (unsigned long *) &event->flags32[0])) {
  549. handled = true;
  550. }
  551. } else {
  552. /*
  553. * Our host is win8 or above. The signaling mechanism
  554. * has changed and we can directly look at the event page.
  555. * If bit n is set then we have an interrup on the channel
  556. * whose id is n.
  557. */
  558. handled = true;
  559. }
  560. if (handled)
  561. tasklet_schedule(hv_context.event_dpc[cpu]);
  562. page_addr = hv_context.synic_message_page[cpu];
  563. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  564. /* Check if there are actual msgs to be processed */
  565. if (msg->header.message_type != HVMSG_NONE) {
  566. handled = true;
  567. tasklet_schedule(&msg_dpc);
  568. }
  569. if (handled)
  570. return IRQ_HANDLED;
  571. else
  572. return IRQ_NONE;
  573. }
  574. /*
  575. * vmbus interrupt flow handler:
  576. * vmbus interrupts can concurrently occur on multiple CPUs and
  577. * can be handled concurrently.
  578. */
  579. static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
  580. {
  581. kstat_incr_irqs_this_cpu(irq, desc);
  582. desc->action->handler(irq, desc->action->dev_id);
  583. }
  584. /*
  585. * vmbus_bus_init -Main vmbus driver initialization routine.
  586. *
  587. * Here, we
  588. * - initialize the vmbus driver context
  589. * - invoke the vmbus hv main init routine
  590. * - get the irq resource
  591. * - retrieve the channel offers
  592. */
  593. static int vmbus_bus_init(int irq)
  594. {
  595. int ret;
  596. /* Hypervisor initialization...setup hypercall page..etc */
  597. ret = hv_init();
  598. if (ret != 0) {
  599. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  600. return ret;
  601. }
  602. tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
  603. ret = bus_register(&hv_bus);
  604. if (ret)
  605. goto err_cleanup;
  606. ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
  607. if (ret != 0) {
  608. pr_err("Unable to request IRQ %d\n",
  609. irq);
  610. goto err_unregister;
  611. }
  612. /*
  613. * Vmbus interrupts can be handled concurrently on
  614. * different CPUs. Establish an appropriate interrupt flow
  615. * handler that can support this model.
  616. */
  617. irq_set_handler(irq, vmbus_flow_handler);
  618. /*
  619. * Register our interrupt handler.
  620. */
  621. hv_register_vmbus_handler(irq, vmbus_isr);
  622. ret = hv_synic_alloc();
  623. if (ret)
  624. goto err_alloc;
  625. /*
  626. * Initialize the per-cpu interrupt state and
  627. * connect to the host.
  628. */
  629. on_each_cpu(hv_synic_init, NULL, 1);
  630. ret = vmbus_connect();
  631. if (ret)
  632. goto err_alloc;
  633. vmbus_request_offers();
  634. return 0;
  635. err_alloc:
  636. hv_synic_free();
  637. free_irq(irq, hv_acpi_dev);
  638. err_unregister:
  639. bus_unregister(&hv_bus);
  640. err_cleanup:
  641. hv_cleanup();
  642. return ret;
  643. }
  644. /**
  645. * __vmbus_child_driver_register - Register a vmbus's driver
  646. * @drv: Pointer to driver structure you want to register
  647. * @owner: owner module of the drv
  648. * @mod_name: module name string
  649. *
  650. * Registers the given driver with Linux through the 'driver_register()' call
  651. * and sets up the hyper-v vmbus handling for this driver.
  652. * It will return the state of the 'driver_register()' call.
  653. *
  654. */
  655. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  656. {
  657. int ret;
  658. pr_info("registering driver %s\n", hv_driver->name);
  659. ret = vmbus_exists();
  660. if (ret < 0)
  661. return ret;
  662. hv_driver->driver.name = hv_driver->name;
  663. hv_driver->driver.owner = owner;
  664. hv_driver->driver.mod_name = mod_name;
  665. hv_driver->driver.bus = &hv_bus;
  666. ret = driver_register(&hv_driver->driver);
  667. return ret;
  668. }
  669. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  670. /**
  671. * vmbus_driver_unregister() - Unregister a vmbus's driver
  672. * @drv: Pointer to driver structure you want to un-register
  673. *
  674. * Un-register the given driver that was previous registered with a call to
  675. * vmbus_driver_register()
  676. */
  677. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  678. {
  679. pr_info("unregistering driver %s\n", hv_driver->name);
  680. if (!vmbus_exists())
  681. driver_unregister(&hv_driver->driver);
  682. }
  683. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  684. /*
  685. * vmbus_device_create - Creates and registers a new child device
  686. * on the vmbus.
  687. */
  688. struct hv_device *vmbus_device_create(uuid_le *type,
  689. uuid_le *instance,
  690. struct vmbus_channel *channel)
  691. {
  692. struct hv_device *child_device_obj;
  693. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  694. if (!child_device_obj) {
  695. pr_err("Unable to allocate device object for child device\n");
  696. return NULL;
  697. }
  698. child_device_obj->channel = channel;
  699. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  700. memcpy(&child_device_obj->dev_instance, instance,
  701. sizeof(uuid_le));
  702. return child_device_obj;
  703. }
  704. /*
  705. * vmbus_device_register - Register the child device
  706. */
  707. int vmbus_device_register(struct hv_device *child_device_obj)
  708. {
  709. int ret = 0;
  710. static atomic_t device_num = ATOMIC_INIT(0);
  711. dev_set_name(&child_device_obj->device, "vmbus_0_%d",
  712. atomic_inc_return(&device_num));
  713. child_device_obj->device.bus = &hv_bus;
  714. child_device_obj->device.parent = &hv_acpi_dev->dev;
  715. child_device_obj->device.release = vmbus_device_release;
  716. /*
  717. * Register with the LDM. This will kick off the driver/device
  718. * binding...which will eventually call vmbus_match() and vmbus_probe()
  719. */
  720. ret = device_register(&child_device_obj->device);
  721. if (ret)
  722. pr_err("Unable to register child device\n");
  723. else
  724. pr_debug("child device %s registered\n",
  725. dev_name(&child_device_obj->device));
  726. return ret;
  727. }
  728. /*
  729. * vmbus_device_unregister - Remove the specified child device
  730. * from the vmbus.
  731. */
  732. void vmbus_device_unregister(struct hv_device *device_obj)
  733. {
  734. pr_debug("child device %s unregistered\n",
  735. dev_name(&device_obj->device));
  736. /*
  737. * Kick off the process of unregistering the device.
  738. * This will call vmbus_remove() and eventually vmbus_device_release()
  739. */
  740. device_unregister(&device_obj->device);
  741. }
  742. /*
  743. * VMBUS is an acpi enumerated device. Get the the IRQ information
  744. * from DSDT.
  745. */
  746. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq)
  747. {
  748. if (res->type == ACPI_RESOURCE_TYPE_IRQ) {
  749. struct acpi_resource_irq *irqp;
  750. irqp = &res->data.irq;
  751. *((unsigned int *)irq) = irqp->interrupts[0];
  752. }
  753. return AE_OK;
  754. }
  755. static int vmbus_acpi_add(struct acpi_device *device)
  756. {
  757. acpi_status result;
  758. hv_acpi_dev = device;
  759. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  760. vmbus_walk_resources, &irq);
  761. if (ACPI_FAILURE(result)) {
  762. complete(&probe_event);
  763. return -ENODEV;
  764. }
  765. complete(&probe_event);
  766. return 0;
  767. }
  768. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  769. {"VMBUS", 0},
  770. {"VMBus", 0},
  771. {"", 0},
  772. };
  773. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  774. static struct acpi_driver vmbus_acpi_driver = {
  775. .name = "vmbus",
  776. .ids = vmbus_acpi_device_ids,
  777. .ops = {
  778. .add = vmbus_acpi_add,
  779. },
  780. };
  781. static int __init hv_acpi_init(void)
  782. {
  783. int ret, t;
  784. if (x86_hyper != &x86_hyper_ms_hyperv)
  785. return -ENODEV;
  786. init_completion(&probe_event);
  787. /*
  788. * Get irq resources first.
  789. */
  790. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  791. if (ret)
  792. return ret;
  793. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  794. if (t == 0) {
  795. ret = -ETIMEDOUT;
  796. goto cleanup;
  797. }
  798. if (irq <= 0) {
  799. ret = -ENODEV;
  800. goto cleanup;
  801. }
  802. ret = vmbus_bus_init(irq);
  803. if (ret)
  804. goto cleanup;
  805. return 0;
  806. cleanup:
  807. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  808. hv_acpi_dev = NULL;
  809. return ret;
  810. }
  811. static void __exit vmbus_exit(void)
  812. {
  813. free_irq(irq, hv_acpi_dev);
  814. vmbus_free_channels();
  815. bus_unregister(&hv_bus);
  816. hv_cleanup();
  817. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  818. }
  819. MODULE_LICENSE("GPL");
  820. subsys_initcall(hv_acpi_init);
  821. module_exit(vmbus_exit);