vmbus_drv.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/slab.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/hyperv.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <asm/hyperv.h>
  37. #include <asm/hypervisor.h>
  38. #include <asm/mshyperv.h>
  39. #include <linux/notifier.h>
  40. #include <linux/ptrace.h>
  41. #include <linux/screen_info.h>
  42. #include <linux/kdebug.h>
  43. #include "hyperv_vmbus.h"
  44. static struct acpi_device *hv_acpi_dev;
  45. static struct completion probe_event;
  46. static void hyperv_report_panic(struct pt_regs *regs)
  47. {
  48. static bool panic_reported;
  49. /*
  50. * We prefer to report panic on 'die' chain as we have proper
  51. * registers to report, but if we miss it (e.g. on BUG()) we need
  52. * to report it on 'panic'.
  53. */
  54. if (panic_reported)
  55. return;
  56. panic_reported = true;
  57. wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
  58. wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
  59. wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
  60. wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
  61. wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
  62. /*
  63. * Let Hyper-V know there is crash data available
  64. */
  65. wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
  66. }
  67. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  68. void *args)
  69. {
  70. struct pt_regs *regs;
  71. regs = current_pt_regs();
  72. hyperv_report_panic(regs);
  73. return NOTIFY_DONE;
  74. }
  75. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  76. void *args)
  77. {
  78. struct die_args *die = (struct die_args *)args;
  79. struct pt_regs *regs = die->regs;
  80. hyperv_report_panic(regs);
  81. return NOTIFY_DONE;
  82. }
  83. static struct notifier_block hyperv_die_block = {
  84. .notifier_call = hyperv_die_event,
  85. };
  86. static struct notifier_block hyperv_panic_block = {
  87. .notifier_call = hyperv_panic_event,
  88. };
  89. struct resource *hyperv_mmio;
  90. static int vmbus_exists(void)
  91. {
  92. if (hv_acpi_dev == NULL)
  93. return -ENODEV;
  94. return 0;
  95. }
  96. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  97. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  98. {
  99. int i;
  100. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  101. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  102. }
  103. static u8 channel_monitor_group(struct vmbus_channel *channel)
  104. {
  105. return (u8)channel->offermsg.monitorid / 32;
  106. }
  107. static u8 channel_monitor_offset(struct vmbus_channel *channel)
  108. {
  109. return (u8)channel->offermsg.monitorid % 32;
  110. }
  111. static u32 channel_pending(struct vmbus_channel *channel,
  112. struct hv_monitor_page *monitor_page)
  113. {
  114. u8 monitor_group = channel_monitor_group(channel);
  115. return monitor_page->trigger_group[monitor_group].pending;
  116. }
  117. static u32 channel_latency(struct vmbus_channel *channel,
  118. struct hv_monitor_page *monitor_page)
  119. {
  120. u8 monitor_group = channel_monitor_group(channel);
  121. u8 monitor_offset = channel_monitor_offset(channel);
  122. return monitor_page->latency[monitor_group][monitor_offset];
  123. }
  124. static u32 channel_conn_id(struct vmbus_channel *channel,
  125. struct hv_monitor_page *monitor_page)
  126. {
  127. u8 monitor_group = channel_monitor_group(channel);
  128. u8 monitor_offset = channel_monitor_offset(channel);
  129. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  130. }
  131. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  132. char *buf)
  133. {
  134. struct hv_device *hv_dev = device_to_hv_device(dev);
  135. if (!hv_dev->channel)
  136. return -ENODEV;
  137. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  138. }
  139. static DEVICE_ATTR_RO(id);
  140. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  141. char *buf)
  142. {
  143. struct hv_device *hv_dev = device_to_hv_device(dev);
  144. if (!hv_dev->channel)
  145. return -ENODEV;
  146. return sprintf(buf, "%d\n", hv_dev->channel->state);
  147. }
  148. static DEVICE_ATTR_RO(state);
  149. static ssize_t monitor_id_show(struct device *dev,
  150. struct device_attribute *dev_attr, char *buf)
  151. {
  152. struct hv_device *hv_dev = device_to_hv_device(dev);
  153. if (!hv_dev->channel)
  154. return -ENODEV;
  155. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  156. }
  157. static DEVICE_ATTR_RO(monitor_id);
  158. static ssize_t class_id_show(struct device *dev,
  159. struct device_attribute *dev_attr, char *buf)
  160. {
  161. struct hv_device *hv_dev = device_to_hv_device(dev);
  162. if (!hv_dev->channel)
  163. return -ENODEV;
  164. return sprintf(buf, "{%pUl}\n",
  165. hv_dev->channel->offermsg.offer.if_type.b);
  166. }
  167. static DEVICE_ATTR_RO(class_id);
  168. static ssize_t device_id_show(struct device *dev,
  169. struct device_attribute *dev_attr, char *buf)
  170. {
  171. struct hv_device *hv_dev = device_to_hv_device(dev);
  172. if (!hv_dev->channel)
  173. return -ENODEV;
  174. return sprintf(buf, "{%pUl}\n",
  175. hv_dev->channel->offermsg.offer.if_instance.b);
  176. }
  177. static DEVICE_ATTR_RO(device_id);
  178. static ssize_t modalias_show(struct device *dev,
  179. struct device_attribute *dev_attr, char *buf)
  180. {
  181. struct hv_device *hv_dev = device_to_hv_device(dev);
  182. char alias_name[VMBUS_ALIAS_LEN + 1];
  183. print_alias_name(hv_dev, alias_name);
  184. return sprintf(buf, "vmbus:%s\n", alias_name);
  185. }
  186. static DEVICE_ATTR_RO(modalias);
  187. static ssize_t server_monitor_pending_show(struct device *dev,
  188. struct device_attribute *dev_attr,
  189. char *buf)
  190. {
  191. struct hv_device *hv_dev = device_to_hv_device(dev);
  192. if (!hv_dev->channel)
  193. return -ENODEV;
  194. return sprintf(buf, "%d\n",
  195. channel_pending(hv_dev->channel,
  196. vmbus_connection.monitor_pages[1]));
  197. }
  198. static DEVICE_ATTR_RO(server_monitor_pending);
  199. static ssize_t client_monitor_pending_show(struct device *dev,
  200. struct device_attribute *dev_attr,
  201. char *buf)
  202. {
  203. struct hv_device *hv_dev = device_to_hv_device(dev);
  204. if (!hv_dev->channel)
  205. return -ENODEV;
  206. return sprintf(buf, "%d\n",
  207. channel_pending(hv_dev->channel,
  208. vmbus_connection.monitor_pages[1]));
  209. }
  210. static DEVICE_ATTR_RO(client_monitor_pending);
  211. static ssize_t server_monitor_latency_show(struct device *dev,
  212. struct device_attribute *dev_attr,
  213. char *buf)
  214. {
  215. struct hv_device *hv_dev = device_to_hv_device(dev);
  216. if (!hv_dev->channel)
  217. return -ENODEV;
  218. return sprintf(buf, "%d\n",
  219. channel_latency(hv_dev->channel,
  220. vmbus_connection.monitor_pages[0]));
  221. }
  222. static DEVICE_ATTR_RO(server_monitor_latency);
  223. static ssize_t client_monitor_latency_show(struct device *dev,
  224. struct device_attribute *dev_attr,
  225. char *buf)
  226. {
  227. struct hv_device *hv_dev = device_to_hv_device(dev);
  228. if (!hv_dev->channel)
  229. return -ENODEV;
  230. return sprintf(buf, "%d\n",
  231. channel_latency(hv_dev->channel,
  232. vmbus_connection.monitor_pages[1]));
  233. }
  234. static DEVICE_ATTR_RO(client_monitor_latency);
  235. static ssize_t server_monitor_conn_id_show(struct device *dev,
  236. struct device_attribute *dev_attr,
  237. char *buf)
  238. {
  239. struct hv_device *hv_dev = device_to_hv_device(dev);
  240. if (!hv_dev->channel)
  241. return -ENODEV;
  242. return sprintf(buf, "%d\n",
  243. channel_conn_id(hv_dev->channel,
  244. vmbus_connection.monitor_pages[0]));
  245. }
  246. static DEVICE_ATTR_RO(server_monitor_conn_id);
  247. static ssize_t client_monitor_conn_id_show(struct device *dev,
  248. struct device_attribute *dev_attr,
  249. char *buf)
  250. {
  251. struct hv_device *hv_dev = device_to_hv_device(dev);
  252. if (!hv_dev->channel)
  253. return -ENODEV;
  254. return sprintf(buf, "%d\n",
  255. channel_conn_id(hv_dev->channel,
  256. vmbus_connection.monitor_pages[1]));
  257. }
  258. static DEVICE_ATTR_RO(client_monitor_conn_id);
  259. static ssize_t out_intr_mask_show(struct device *dev,
  260. struct device_attribute *dev_attr, char *buf)
  261. {
  262. struct hv_device *hv_dev = device_to_hv_device(dev);
  263. struct hv_ring_buffer_debug_info outbound;
  264. if (!hv_dev->channel)
  265. return -ENODEV;
  266. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  267. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  268. }
  269. static DEVICE_ATTR_RO(out_intr_mask);
  270. static ssize_t out_read_index_show(struct device *dev,
  271. struct device_attribute *dev_attr, char *buf)
  272. {
  273. struct hv_device *hv_dev = device_to_hv_device(dev);
  274. struct hv_ring_buffer_debug_info outbound;
  275. if (!hv_dev->channel)
  276. return -ENODEV;
  277. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  278. return sprintf(buf, "%d\n", outbound.current_read_index);
  279. }
  280. static DEVICE_ATTR_RO(out_read_index);
  281. static ssize_t out_write_index_show(struct device *dev,
  282. struct device_attribute *dev_attr,
  283. char *buf)
  284. {
  285. struct hv_device *hv_dev = device_to_hv_device(dev);
  286. struct hv_ring_buffer_debug_info outbound;
  287. if (!hv_dev->channel)
  288. return -ENODEV;
  289. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  290. return sprintf(buf, "%d\n", outbound.current_write_index);
  291. }
  292. static DEVICE_ATTR_RO(out_write_index);
  293. static ssize_t out_read_bytes_avail_show(struct device *dev,
  294. struct device_attribute *dev_attr,
  295. char *buf)
  296. {
  297. struct hv_device *hv_dev = device_to_hv_device(dev);
  298. struct hv_ring_buffer_debug_info outbound;
  299. if (!hv_dev->channel)
  300. return -ENODEV;
  301. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  302. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  303. }
  304. static DEVICE_ATTR_RO(out_read_bytes_avail);
  305. static ssize_t out_write_bytes_avail_show(struct device *dev,
  306. struct device_attribute *dev_attr,
  307. char *buf)
  308. {
  309. struct hv_device *hv_dev = device_to_hv_device(dev);
  310. struct hv_ring_buffer_debug_info outbound;
  311. if (!hv_dev->channel)
  312. return -ENODEV;
  313. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  314. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  315. }
  316. static DEVICE_ATTR_RO(out_write_bytes_avail);
  317. static ssize_t in_intr_mask_show(struct device *dev,
  318. struct device_attribute *dev_attr, char *buf)
  319. {
  320. struct hv_device *hv_dev = device_to_hv_device(dev);
  321. struct hv_ring_buffer_debug_info inbound;
  322. if (!hv_dev->channel)
  323. return -ENODEV;
  324. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  325. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  326. }
  327. static DEVICE_ATTR_RO(in_intr_mask);
  328. static ssize_t in_read_index_show(struct device *dev,
  329. struct device_attribute *dev_attr, char *buf)
  330. {
  331. struct hv_device *hv_dev = device_to_hv_device(dev);
  332. struct hv_ring_buffer_debug_info inbound;
  333. if (!hv_dev->channel)
  334. return -ENODEV;
  335. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  336. return sprintf(buf, "%d\n", inbound.current_read_index);
  337. }
  338. static DEVICE_ATTR_RO(in_read_index);
  339. static ssize_t in_write_index_show(struct device *dev,
  340. struct device_attribute *dev_attr, char *buf)
  341. {
  342. struct hv_device *hv_dev = device_to_hv_device(dev);
  343. struct hv_ring_buffer_debug_info inbound;
  344. if (!hv_dev->channel)
  345. return -ENODEV;
  346. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  347. return sprintf(buf, "%d\n", inbound.current_write_index);
  348. }
  349. static DEVICE_ATTR_RO(in_write_index);
  350. static ssize_t in_read_bytes_avail_show(struct device *dev,
  351. struct device_attribute *dev_attr,
  352. char *buf)
  353. {
  354. struct hv_device *hv_dev = device_to_hv_device(dev);
  355. struct hv_ring_buffer_debug_info inbound;
  356. if (!hv_dev->channel)
  357. return -ENODEV;
  358. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  359. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  360. }
  361. static DEVICE_ATTR_RO(in_read_bytes_avail);
  362. static ssize_t in_write_bytes_avail_show(struct device *dev,
  363. struct device_attribute *dev_attr,
  364. char *buf)
  365. {
  366. struct hv_device *hv_dev = device_to_hv_device(dev);
  367. struct hv_ring_buffer_debug_info inbound;
  368. if (!hv_dev->channel)
  369. return -ENODEV;
  370. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  371. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  372. }
  373. static DEVICE_ATTR_RO(in_write_bytes_avail);
  374. static ssize_t channel_vp_mapping_show(struct device *dev,
  375. struct device_attribute *dev_attr,
  376. char *buf)
  377. {
  378. struct hv_device *hv_dev = device_to_hv_device(dev);
  379. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  380. unsigned long flags;
  381. int buf_size = PAGE_SIZE, n_written, tot_written;
  382. struct list_head *cur;
  383. if (!channel)
  384. return -ENODEV;
  385. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  386. channel->offermsg.child_relid, channel->target_cpu);
  387. spin_lock_irqsave(&channel->lock, flags);
  388. list_for_each(cur, &channel->sc_list) {
  389. if (tot_written >= buf_size - 1)
  390. break;
  391. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  392. n_written = scnprintf(buf + tot_written,
  393. buf_size - tot_written,
  394. "%u:%u\n",
  395. cur_sc->offermsg.child_relid,
  396. cur_sc->target_cpu);
  397. tot_written += n_written;
  398. }
  399. spin_unlock_irqrestore(&channel->lock, flags);
  400. return tot_written;
  401. }
  402. static DEVICE_ATTR_RO(channel_vp_mapping);
  403. static ssize_t vendor_show(struct device *dev,
  404. struct device_attribute *dev_attr,
  405. char *buf)
  406. {
  407. struct hv_device *hv_dev = device_to_hv_device(dev);
  408. return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
  409. }
  410. static DEVICE_ATTR_RO(vendor);
  411. static ssize_t device_show(struct device *dev,
  412. struct device_attribute *dev_attr,
  413. char *buf)
  414. {
  415. struct hv_device *hv_dev = device_to_hv_device(dev);
  416. return sprintf(buf, "0x%x\n", hv_dev->device_id);
  417. }
  418. static DEVICE_ATTR_RO(device);
  419. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  420. static struct attribute *vmbus_attrs[] = {
  421. &dev_attr_id.attr,
  422. &dev_attr_state.attr,
  423. &dev_attr_monitor_id.attr,
  424. &dev_attr_class_id.attr,
  425. &dev_attr_device_id.attr,
  426. &dev_attr_modalias.attr,
  427. &dev_attr_server_monitor_pending.attr,
  428. &dev_attr_client_monitor_pending.attr,
  429. &dev_attr_server_monitor_latency.attr,
  430. &dev_attr_client_monitor_latency.attr,
  431. &dev_attr_server_monitor_conn_id.attr,
  432. &dev_attr_client_monitor_conn_id.attr,
  433. &dev_attr_out_intr_mask.attr,
  434. &dev_attr_out_read_index.attr,
  435. &dev_attr_out_write_index.attr,
  436. &dev_attr_out_read_bytes_avail.attr,
  437. &dev_attr_out_write_bytes_avail.attr,
  438. &dev_attr_in_intr_mask.attr,
  439. &dev_attr_in_read_index.attr,
  440. &dev_attr_in_write_index.attr,
  441. &dev_attr_in_read_bytes_avail.attr,
  442. &dev_attr_in_write_bytes_avail.attr,
  443. &dev_attr_channel_vp_mapping.attr,
  444. &dev_attr_vendor.attr,
  445. &dev_attr_device.attr,
  446. NULL,
  447. };
  448. ATTRIBUTE_GROUPS(vmbus);
  449. /*
  450. * vmbus_uevent - add uevent for our device
  451. *
  452. * This routine is invoked when a device is added or removed on the vmbus to
  453. * generate a uevent to udev in the userspace. The udev will then look at its
  454. * rule and the uevent generated here to load the appropriate driver
  455. *
  456. * The alias string will be of the form vmbus:guid where guid is the string
  457. * representation of the device guid (each byte of the guid will be
  458. * represented with two hex characters.
  459. */
  460. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  461. {
  462. struct hv_device *dev = device_to_hv_device(device);
  463. int ret;
  464. char alias_name[VMBUS_ALIAS_LEN + 1];
  465. print_alias_name(dev, alias_name);
  466. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  467. return ret;
  468. }
  469. static const uuid_le null_guid;
  470. static inline bool is_null_guid(const uuid_le *guid)
  471. {
  472. if (uuid_le_cmp(*guid, null_guid))
  473. return false;
  474. return true;
  475. }
  476. /*
  477. * Return a matching hv_vmbus_device_id pointer.
  478. * If there is no match, return NULL.
  479. */
  480. static const struct hv_vmbus_device_id *hv_vmbus_get_id(
  481. const struct hv_vmbus_device_id *id,
  482. const uuid_le *guid)
  483. {
  484. for (; !is_null_guid(&id->guid); id++)
  485. if (!uuid_le_cmp(id->guid, *guid))
  486. return id;
  487. return NULL;
  488. }
  489. /*
  490. * vmbus_match - Attempt to match the specified device to the specified driver
  491. */
  492. static int vmbus_match(struct device *device, struct device_driver *driver)
  493. {
  494. struct hv_driver *drv = drv_to_hv_drv(driver);
  495. struct hv_device *hv_dev = device_to_hv_device(device);
  496. /* The hv_sock driver handles all hv_sock offers. */
  497. if (is_hvsock_channel(hv_dev->channel))
  498. return drv->hvsock;
  499. if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
  500. return 1;
  501. return 0;
  502. }
  503. /*
  504. * vmbus_probe - Add the new vmbus's child device
  505. */
  506. static int vmbus_probe(struct device *child_device)
  507. {
  508. int ret = 0;
  509. struct hv_driver *drv =
  510. drv_to_hv_drv(child_device->driver);
  511. struct hv_device *dev = device_to_hv_device(child_device);
  512. const struct hv_vmbus_device_id *dev_id;
  513. dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
  514. if (drv->probe) {
  515. ret = drv->probe(dev, dev_id);
  516. if (ret != 0)
  517. pr_err("probe failed for device %s (%d)\n",
  518. dev_name(child_device), ret);
  519. } else {
  520. pr_err("probe not set for driver %s\n",
  521. dev_name(child_device));
  522. ret = -ENODEV;
  523. }
  524. return ret;
  525. }
  526. /*
  527. * vmbus_remove - Remove a vmbus device
  528. */
  529. static int vmbus_remove(struct device *child_device)
  530. {
  531. struct hv_driver *drv;
  532. struct hv_device *dev = device_to_hv_device(child_device);
  533. if (child_device->driver) {
  534. drv = drv_to_hv_drv(child_device->driver);
  535. if (drv->remove)
  536. drv->remove(dev);
  537. }
  538. return 0;
  539. }
  540. /*
  541. * vmbus_shutdown - Shutdown a vmbus device
  542. */
  543. static void vmbus_shutdown(struct device *child_device)
  544. {
  545. struct hv_driver *drv;
  546. struct hv_device *dev = device_to_hv_device(child_device);
  547. /* The device may not be attached yet */
  548. if (!child_device->driver)
  549. return;
  550. drv = drv_to_hv_drv(child_device->driver);
  551. if (drv->shutdown)
  552. drv->shutdown(dev);
  553. return;
  554. }
  555. /*
  556. * vmbus_device_release - Final callback release of the vmbus child device
  557. */
  558. static void vmbus_device_release(struct device *device)
  559. {
  560. struct hv_device *hv_dev = device_to_hv_device(device);
  561. struct vmbus_channel *channel = hv_dev->channel;
  562. hv_process_channel_removal(channel,
  563. channel->offermsg.child_relid);
  564. kfree(hv_dev);
  565. }
  566. /* The one and only one */
  567. static struct bus_type hv_bus = {
  568. .name = "vmbus",
  569. .match = vmbus_match,
  570. .shutdown = vmbus_shutdown,
  571. .remove = vmbus_remove,
  572. .probe = vmbus_probe,
  573. .uevent = vmbus_uevent,
  574. .dev_groups = vmbus_groups,
  575. };
  576. struct onmessage_work_context {
  577. struct work_struct work;
  578. struct hv_message msg;
  579. };
  580. static void vmbus_onmessage_work(struct work_struct *work)
  581. {
  582. struct onmessage_work_context *ctx;
  583. /* Do not process messages if we're in DISCONNECTED state */
  584. if (vmbus_connection.conn_state == DISCONNECTED)
  585. return;
  586. ctx = container_of(work, struct onmessage_work_context,
  587. work);
  588. vmbus_onmessage(&ctx->msg);
  589. kfree(ctx);
  590. }
  591. static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
  592. {
  593. struct clock_event_device *dev = hv_context.clk_evt[cpu];
  594. if (dev->event_handler)
  595. dev->event_handler(dev);
  596. vmbus_signal_eom(msg);
  597. }
  598. void vmbus_on_msg_dpc(unsigned long data)
  599. {
  600. int cpu = smp_processor_id();
  601. void *page_addr = hv_context.synic_message_page[cpu];
  602. struct hv_message *msg = (struct hv_message *)page_addr +
  603. VMBUS_MESSAGE_SINT;
  604. struct vmbus_channel_message_header *hdr;
  605. struct vmbus_channel_message_table_entry *entry;
  606. struct onmessage_work_context *ctx;
  607. if (msg->header.message_type == HVMSG_NONE)
  608. /* no msg */
  609. return;
  610. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  611. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  612. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  613. goto msg_handled;
  614. }
  615. entry = &channel_message_table[hdr->msgtype];
  616. if (entry->handler_type == VMHT_BLOCKING) {
  617. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  618. if (ctx == NULL)
  619. return;
  620. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  621. memcpy(&ctx->msg, msg, sizeof(*msg));
  622. queue_work(vmbus_connection.work_queue, &ctx->work);
  623. } else
  624. entry->message_handler(hdr);
  625. msg_handled:
  626. vmbus_signal_eom(msg);
  627. }
  628. static void vmbus_isr(void)
  629. {
  630. int cpu = smp_processor_id();
  631. void *page_addr;
  632. struct hv_message *msg;
  633. union hv_synic_event_flags *event;
  634. bool handled = false;
  635. page_addr = hv_context.synic_event_page[cpu];
  636. if (page_addr == NULL)
  637. return;
  638. event = (union hv_synic_event_flags *)page_addr +
  639. VMBUS_MESSAGE_SINT;
  640. /*
  641. * Check for events before checking for messages. This is the order
  642. * in which events and messages are checked in Windows guests on
  643. * Hyper-V, and the Windows team suggested we do the same.
  644. */
  645. if ((vmbus_proto_version == VERSION_WS2008) ||
  646. (vmbus_proto_version == VERSION_WIN7)) {
  647. /* Since we are a child, we only need to check bit 0 */
  648. if (sync_test_and_clear_bit(0,
  649. (unsigned long *) &event->flags32[0])) {
  650. handled = true;
  651. }
  652. } else {
  653. /*
  654. * Our host is win8 or above. The signaling mechanism
  655. * has changed and we can directly look at the event page.
  656. * If bit n is set then we have an interrup on the channel
  657. * whose id is n.
  658. */
  659. handled = true;
  660. }
  661. if (handled)
  662. tasklet_schedule(hv_context.event_dpc[cpu]);
  663. page_addr = hv_context.synic_message_page[cpu];
  664. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  665. /* Check if there are actual msgs to be processed */
  666. if (msg->header.message_type != HVMSG_NONE) {
  667. if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
  668. hv_process_timer_expiration(msg, cpu);
  669. else
  670. tasklet_schedule(hv_context.msg_dpc[cpu]);
  671. }
  672. }
  673. /*
  674. * vmbus_bus_init -Main vmbus driver initialization routine.
  675. *
  676. * Here, we
  677. * - initialize the vmbus driver context
  678. * - invoke the vmbus hv main init routine
  679. * - retrieve the channel offers
  680. */
  681. static int vmbus_bus_init(void)
  682. {
  683. int ret;
  684. /* Hypervisor initialization...setup hypercall page..etc */
  685. ret = hv_init();
  686. if (ret != 0) {
  687. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  688. return ret;
  689. }
  690. ret = bus_register(&hv_bus);
  691. if (ret)
  692. goto err_cleanup;
  693. hv_setup_vmbus_irq(vmbus_isr);
  694. ret = hv_synic_alloc();
  695. if (ret)
  696. goto err_alloc;
  697. /*
  698. * Initialize the per-cpu interrupt state and
  699. * connect to the host.
  700. */
  701. on_each_cpu(hv_synic_init, NULL, 1);
  702. ret = vmbus_connect();
  703. if (ret)
  704. goto err_connect;
  705. if (vmbus_proto_version > VERSION_WIN7)
  706. cpu_hotplug_disable();
  707. /*
  708. * Only register if the crash MSRs are available
  709. */
  710. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  711. register_die_notifier(&hyperv_die_block);
  712. atomic_notifier_chain_register(&panic_notifier_list,
  713. &hyperv_panic_block);
  714. }
  715. vmbus_request_offers();
  716. return 0;
  717. err_connect:
  718. on_each_cpu(hv_synic_cleanup, NULL, 1);
  719. err_alloc:
  720. hv_synic_free();
  721. hv_remove_vmbus_irq();
  722. bus_unregister(&hv_bus);
  723. err_cleanup:
  724. hv_cleanup();
  725. return ret;
  726. }
  727. /**
  728. * __vmbus_child_driver_register() - Register a vmbus's driver
  729. * @hv_driver: Pointer to driver structure you want to register
  730. * @owner: owner module of the drv
  731. * @mod_name: module name string
  732. *
  733. * Registers the given driver with Linux through the 'driver_register()' call
  734. * and sets up the hyper-v vmbus handling for this driver.
  735. * It will return the state of the 'driver_register()' call.
  736. *
  737. */
  738. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  739. {
  740. int ret;
  741. pr_info("registering driver %s\n", hv_driver->name);
  742. ret = vmbus_exists();
  743. if (ret < 0)
  744. return ret;
  745. hv_driver->driver.name = hv_driver->name;
  746. hv_driver->driver.owner = owner;
  747. hv_driver->driver.mod_name = mod_name;
  748. hv_driver->driver.bus = &hv_bus;
  749. ret = driver_register(&hv_driver->driver);
  750. return ret;
  751. }
  752. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  753. /**
  754. * vmbus_driver_unregister() - Unregister a vmbus's driver
  755. * @hv_driver: Pointer to driver structure you want to
  756. * un-register
  757. *
  758. * Un-register the given driver that was previous registered with a call to
  759. * vmbus_driver_register()
  760. */
  761. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  762. {
  763. pr_info("unregistering driver %s\n", hv_driver->name);
  764. if (!vmbus_exists())
  765. driver_unregister(&hv_driver->driver);
  766. }
  767. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  768. /*
  769. * vmbus_device_create - Creates and registers a new child device
  770. * on the vmbus.
  771. */
  772. struct hv_device *vmbus_device_create(const uuid_le *type,
  773. const uuid_le *instance,
  774. struct vmbus_channel *channel)
  775. {
  776. struct hv_device *child_device_obj;
  777. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  778. if (!child_device_obj) {
  779. pr_err("Unable to allocate device object for child device\n");
  780. return NULL;
  781. }
  782. child_device_obj->channel = channel;
  783. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  784. memcpy(&child_device_obj->dev_instance, instance,
  785. sizeof(uuid_le));
  786. child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
  787. return child_device_obj;
  788. }
  789. /*
  790. * vmbus_device_register - Register the child device
  791. */
  792. int vmbus_device_register(struct hv_device *child_device_obj)
  793. {
  794. int ret = 0;
  795. dev_set_name(&child_device_obj->device, "vmbus_%d",
  796. child_device_obj->channel->id);
  797. child_device_obj->device.bus = &hv_bus;
  798. child_device_obj->device.parent = &hv_acpi_dev->dev;
  799. child_device_obj->device.release = vmbus_device_release;
  800. /*
  801. * Register with the LDM. This will kick off the driver/device
  802. * binding...which will eventually call vmbus_match() and vmbus_probe()
  803. */
  804. ret = device_register(&child_device_obj->device);
  805. if (ret)
  806. pr_err("Unable to register child device\n");
  807. else
  808. pr_debug("child device %s registered\n",
  809. dev_name(&child_device_obj->device));
  810. return ret;
  811. }
  812. /*
  813. * vmbus_device_unregister - Remove the specified child device
  814. * from the vmbus.
  815. */
  816. void vmbus_device_unregister(struct hv_device *device_obj)
  817. {
  818. pr_debug("child device %s unregistered\n",
  819. dev_name(&device_obj->device));
  820. /*
  821. * Kick off the process of unregistering the device.
  822. * This will call vmbus_remove() and eventually vmbus_device_release()
  823. */
  824. device_unregister(&device_obj->device);
  825. }
  826. /*
  827. * VMBUS is an acpi enumerated device. Get the information we
  828. * need from DSDT.
  829. */
  830. #define VTPM_BASE_ADDRESS 0xfed40000
  831. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  832. {
  833. resource_size_t start = 0;
  834. resource_size_t end = 0;
  835. struct resource *new_res;
  836. struct resource **old_res = &hyperv_mmio;
  837. struct resource **prev_res = NULL;
  838. switch (res->type) {
  839. /*
  840. * "Address" descriptors are for bus windows. Ignore
  841. * "memory" descriptors, which are for registers on
  842. * devices.
  843. */
  844. case ACPI_RESOURCE_TYPE_ADDRESS32:
  845. start = res->data.address32.address.minimum;
  846. end = res->data.address32.address.maximum;
  847. break;
  848. case ACPI_RESOURCE_TYPE_ADDRESS64:
  849. start = res->data.address64.address.minimum;
  850. end = res->data.address64.address.maximum;
  851. break;
  852. default:
  853. /* Unused resource type */
  854. return AE_OK;
  855. }
  856. /*
  857. * Ignore ranges that are below 1MB, as they're not
  858. * necessary or useful here.
  859. */
  860. if (end < 0x100000)
  861. return AE_OK;
  862. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  863. if (!new_res)
  864. return AE_NO_MEMORY;
  865. /* If this range overlaps the virtual TPM, truncate it. */
  866. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  867. end = VTPM_BASE_ADDRESS;
  868. new_res->name = "hyperv mmio";
  869. new_res->flags = IORESOURCE_MEM;
  870. new_res->start = start;
  871. new_res->end = end;
  872. /*
  873. * Stick ranges from higher in address space at the front of the list.
  874. * If two ranges are adjacent, merge them.
  875. */
  876. do {
  877. if (!*old_res) {
  878. *old_res = new_res;
  879. break;
  880. }
  881. if (((*old_res)->end + 1) == new_res->start) {
  882. (*old_res)->end = new_res->end;
  883. kfree(new_res);
  884. break;
  885. }
  886. if ((*old_res)->start == new_res->end + 1) {
  887. (*old_res)->start = new_res->start;
  888. kfree(new_res);
  889. break;
  890. }
  891. if ((*old_res)->end < new_res->start) {
  892. new_res->sibling = *old_res;
  893. if (prev_res)
  894. (*prev_res)->sibling = new_res;
  895. *old_res = new_res;
  896. break;
  897. }
  898. prev_res = old_res;
  899. old_res = &(*old_res)->sibling;
  900. } while (1);
  901. return AE_OK;
  902. }
  903. static int vmbus_acpi_remove(struct acpi_device *device)
  904. {
  905. struct resource *cur_res;
  906. struct resource *next_res;
  907. if (hyperv_mmio) {
  908. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  909. next_res = cur_res->sibling;
  910. kfree(cur_res);
  911. }
  912. }
  913. return 0;
  914. }
  915. /**
  916. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  917. * @new: If successful, supplied a pointer to the
  918. * allocated MMIO space.
  919. * @device_obj: Identifies the caller
  920. * @min: Minimum guest physical address of the
  921. * allocation
  922. * @max: Maximum guest physical address
  923. * @size: Size of the range to be allocated
  924. * @align: Alignment of the range to be allocated
  925. * @fb_overlap_ok: Whether this allocation can be allowed
  926. * to overlap the video frame buffer.
  927. *
  928. * This function walks the resources granted to VMBus by the
  929. * _CRS object in the ACPI namespace underneath the parent
  930. * "bridge" whether that's a root PCI bus in the Generation 1
  931. * case or a Module Device in the Generation 2 case. It then
  932. * attempts to allocate from the global MMIO pool in a way that
  933. * matches the constraints supplied in these parameters and by
  934. * that _CRS.
  935. *
  936. * Return: 0 on success, -errno on failure
  937. */
  938. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  939. resource_size_t min, resource_size_t max,
  940. resource_size_t size, resource_size_t align,
  941. bool fb_overlap_ok)
  942. {
  943. struct resource *iter;
  944. resource_size_t range_min, range_max, start, local_min, local_max;
  945. const char *dev_n = dev_name(&device_obj->device);
  946. u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
  947. int i;
  948. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  949. if ((iter->start >= max) || (iter->end <= min))
  950. continue;
  951. range_min = iter->start;
  952. range_max = iter->end;
  953. /* If this range overlaps the frame buffer, split it into
  954. two tries. */
  955. for (i = 0; i < 2; i++) {
  956. local_min = range_min;
  957. local_max = range_max;
  958. if (fb_overlap_ok || (range_min >= fb_end) ||
  959. (range_max <= screen_info.lfb_base)) {
  960. i++;
  961. } else {
  962. if ((range_min <= screen_info.lfb_base) &&
  963. (range_max >= screen_info.lfb_base)) {
  964. /*
  965. * The frame buffer is in this window,
  966. * so trim this into the part that
  967. * preceeds the frame buffer.
  968. */
  969. local_max = screen_info.lfb_base - 1;
  970. range_min = fb_end;
  971. } else {
  972. range_min = fb_end;
  973. continue;
  974. }
  975. }
  976. start = (local_min + align - 1) & ~(align - 1);
  977. for (; start + size - 1 <= local_max; start += align) {
  978. *new = request_mem_region_exclusive(start, size,
  979. dev_n);
  980. if (*new)
  981. return 0;
  982. }
  983. }
  984. }
  985. return -ENXIO;
  986. }
  987. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  988. /**
  989. * vmbus_cpu_number_to_vp_number() - Map CPU to VP.
  990. * @cpu_number: CPU number in Linux terms
  991. *
  992. * This function returns the mapping between the Linux processor
  993. * number and the hypervisor's virtual processor number, useful
  994. * in making hypercalls and such that talk about specific
  995. * processors.
  996. *
  997. * Return: Virtual processor number in Hyper-V terms
  998. */
  999. int vmbus_cpu_number_to_vp_number(int cpu_number)
  1000. {
  1001. return hv_context.vp_index[cpu_number];
  1002. }
  1003. EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
  1004. static int vmbus_acpi_add(struct acpi_device *device)
  1005. {
  1006. acpi_status result;
  1007. int ret_val = -ENODEV;
  1008. struct acpi_device *ancestor;
  1009. hv_acpi_dev = device;
  1010. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1011. vmbus_walk_resources, NULL);
  1012. if (ACPI_FAILURE(result))
  1013. goto acpi_walk_err;
  1014. /*
  1015. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1016. * firmware) is the VMOD that has the mmio ranges. Get that.
  1017. */
  1018. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1019. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1020. vmbus_walk_resources, NULL);
  1021. if (ACPI_FAILURE(result))
  1022. continue;
  1023. if (hyperv_mmio)
  1024. break;
  1025. }
  1026. ret_val = 0;
  1027. acpi_walk_err:
  1028. complete(&probe_event);
  1029. if (ret_val)
  1030. vmbus_acpi_remove(device);
  1031. return ret_val;
  1032. }
  1033. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  1034. {"VMBUS", 0},
  1035. {"VMBus", 0},
  1036. {"", 0},
  1037. };
  1038. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  1039. static struct acpi_driver vmbus_acpi_driver = {
  1040. .name = "vmbus",
  1041. .ids = vmbus_acpi_device_ids,
  1042. .ops = {
  1043. .add = vmbus_acpi_add,
  1044. .remove = vmbus_acpi_remove,
  1045. },
  1046. };
  1047. static void hv_kexec_handler(void)
  1048. {
  1049. int cpu;
  1050. hv_synic_clockevents_cleanup();
  1051. vmbus_initiate_unload(false);
  1052. for_each_online_cpu(cpu)
  1053. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1054. hv_cleanup();
  1055. };
  1056. static void hv_crash_handler(struct pt_regs *regs)
  1057. {
  1058. vmbus_initiate_unload(true);
  1059. /*
  1060. * In crash handler we can't schedule synic cleanup for all CPUs,
  1061. * doing the cleanup for current CPU only. This should be sufficient
  1062. * for kdump.
  1063. */
  1064. hv_synic_cleanup(NULL);
  1065. hv_cleanup();
  1066. };
  1067. static int __init hv_acpi_init(void)
  1068. {
  1069. int ret, t;
  1070. if (x86_hyper != &x86_hyper_ms_hyperv)
  1071. return -ENODEV;
  1072. init_completion(&probe_event);
  1073. /*
  1074. * Get ACPI resources first.
  1075. */
  1076. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  1077. if (ret)
  1078. return ret;
  1079. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  1080. if (t == 0) {
  1081. ret = -ETIMEDOUT;
  1082. goto cleanup;
  1083. }
  1084. ret = vmbus_bus_init();
  1085. if (ret)
  1086. goto cleanup;
  1087. hv_setup_kexec_handler(hv_kexec_handler);
  1088. hv_setup_crash_handler(hv_crash_handler);
  1089. return 0;
  1090. cleanup:
  1091. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1092. hv_acpi_dev = NULL;
  1093. return ret;
  1094. }
  1095. static void __exit vmbus_exit(void)
  1096. {
  1097. int cpu;
  1098. hv_remove_kexec_handler();
  1099. hv_remove_crash_handler();
  1100. vmbus_connection.conn_state = DISCONNECTED;
  1101. hv_synic_clockevents_cleanup();
  1102. vmbus_disconnect();
  1103. hv_remove_vmbus_irq();
  1104. for_each_online_cpu(cpu)
  1105. tasklet_kill(hv_context.msg_dpc[cpu]);
  1106. vmbus_free_channels();
  1107. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1108. unregister_die_notifier(&hyperv_die_block);
  1109. atomic_notifier_chain_unregister(&panic_notifier_list,
  1110. &hyperv_panic_block);
  1111. }
  1112. bus_unregister(&hv_bus);
  1113. hv_cleanup();
  1114. for_each_online_cpu(cpu) {
  1115. tasklet_kill(hv_context.event_dpc[cpu]);
  1116. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1117. }
  1118. hv_synic_free();
  1119. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1120. if (vmbus_proto_version > VERSION_WIN7)
  1121. cpu_hotplug_enable();
  1122. }
  1123. MODULE_LICENSE("GPL");
  1124. subsys_initcall(hv_acpi_init);
  1125. module_exit(vmbus_exit);