vmbus_drv.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/slab.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/hyperv.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <asm/hyperv.h>
  37. #include <asm/hypervisor.h>
  38. #include <asm/mshyperv.h>
  39. #include <linux/notifier.h>
  40. #include <linux/ptrace.h>
  41. #include <linux/screen_info.h>
  42. #include <linux/kdebug.h>
  43. #include "hyperv_vmbus.h"
  44. static struct acpi_device *hv_acpi_dev;
  45. static struct tasklet_struct msg_dpc;
  46. static struct completion probe_event;
  47. static int irq;
  48. static void hyperv_report_panic(struct pt_regs *regs)
  49. {
  50. static bool panic_reported;
  51. /*
  52. * We prefer to report panic on 'die' chain as we have proper
  53. * registers to report, but if we miss it (e.g. on BUG()) we need
  54. * to report it on 'panic'.
  55. */
  56. if (panic_reported)
  57. return;
  58. panic_reported = true;
  59. wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
  60. wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
  61. wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
  62. wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
  63. wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
  64. /*
  65. * Let Hyper-V know there is crash data available
  66. */
  67. wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
  68. }
  69. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  70. void *args)
  71. {
  72. struct pt_regs *regs;
  73. regs = current_pt_regs();
  74. hyperv_report_panic(regs);
  75. return NOTIFY_DONE;
  76. }
  77. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  78. void *args)
  79. {
  80. struct die_args *die = (struct die_args *)args;
  81. struct pt_regs *regs = die->regs;
  82. hyperv_report_panic(regs);
  83. return NOTIFY_DONE;
  84. }
  85. static struct notifier_block hyperv_die_block = {
  86. .notifier_call = hyperv_die_event,
  87. };
  88. static struct notifier_block hyperv_panic_block = {
  89. .notifier_call = hyperv_panic_event,
  90. };
  91. struct resource *hyperv_mmio;
  92. static int vmbus_exists(void)
  93. {
  94. if (hv_acpi_dev == NULL)
  95. return -ENODEV;
  96. return 0;
  97. }
  98. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  99. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  100. {
  101. int i;
  102. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  103. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  104. }
  105. static u8 channel_monitor_group(struct vmbus_channel *channel)
  106. {
  107. return (u8)channel->offermsg.monitorid / 32;
  108. }
  109. static u8 channel_monitor_offset(struct vmbus_channel *channel)
  110. {
  111. return (u8)channel->offermsg.monitorid % 32;
  112. }
  113. static u32 channel_pending(struct vmbus_channel *channel,
  114. struct hv_monitor_page *monitor_page)
  115. {
  116. u8 monitor_group = channel_monitor_group(channel);
  117. return monitor_page->trigger_group[monitor_group].pending;
  118. }
  119. static u32 channel_latency(struct vmbus_channel *channel,
  120. struct hv_monitor_page *monitor_page)
  121. {
  122. u8 monitor_group = channel_monitor_group(channel);
  123. u8 monitor_offset = channel_monitor_offset(channel);
  124. return monitor_page->latency[monitor_group][monitor_offset];
  125. }
  126. static u32 channel_conn_id(struct vmbus_channel *channel,
  127. struct hv_monitor_page *monitor_page)
  128. {
  129. u8 monitor_group = channel_monitor_group(channel);
  130. u8 monitor_offset = channel_monitor_offset(channel);
  131. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  132. }
  133. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  134. char *buf)
  135. {
  136. struct hv_device *hv_dev = device_to_hv_device(dev);
  137. if (!hv_dev->channel)
  138. return -ENODEV;
  139. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  140. }
  141. static DEVICE_ATTR_RO(id);
  142. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  143. char *buf)
  144. {
  145. struct hv_device *hv_dev = device_to_hv_device(dev);
  146. if (!hv_dev->channel)
  147. return -ENODEV;
  148. return sprintf(buf, "%d\n", hv_dev->channel->state);
  149. }
  150. static DEVICE_ATTR_RO(state);
  151. static ssize_t monitor_id_show(struct device *dev,
  152. struct device_attribute *dev_attr, char *buf)
  153. {
  154. struct hv_device *hv_dev = device_to_hv_device(dev);
  155. if (!hv_dev->channel)
  156. return -ENODEV;
  157. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  158. }
  159. static DEVICE_ATTR_RO(monitor_id);
  160. static ssize_t class_id_show(struct device *dev,
  161. struct device_attribute *dev_attr, char *buf)
  162. {
  163. struct hv_device *hv_dev = device_to_hv_device(dev);
  164. if (!hv_dev->channel)
  165. return -ENODEV;
  166. return sprintf(buf, "{%pUl}\n",
  167. hv_dev->channel->offermsg.offer.if_type.b);
  168. }
  169. static DEVICE_ATTR_RO(class_id);
  170. static ssize_t device_id_show(struct device *dev,
  171. struct device_attribute *dev_attr, char *buf)
  172. {
  173. struct hv_device *hv_dev = device_to_hv_device(dev);
  174. if (!hv_dev->channel)
  175. return -ENODEV;
  176. return sprintf(buf, "{%pUl}\n",
  177. hv_dev->channel->offermsg.offer.if_instance.b);
  178. }
  179. static DEVICE_ATTR_RO(device_id);
  180. static ssize_t modalias_show(struct device *dev,
  181. struct device_attribute *dev_attr, char *buf)
  182. {
  183. struct hv_device *hv_dev = device_to_hv_device(dev);
  184. char alias_name[VMBUS_ALIAS_LEN + 1];
  185. print_alias_name(hv_dev, alias_name);
  186. return sprintf(buf, "vmbus:%s\n", alias_name);
  187. }
  188. static DEVICE_ATTR_RO(modalias);
  189. static ssize_t server_monitor_pending_show(struct device *dev,
  190. struct device_attribute *dev_attr,
  191. char *buf)
  192. {
  193. struct hv_device *hv_dev = device_to_hv_device(dev);
  194. if (!hv_dev->channel)
  195. return -ENODEV;
  196. return sprintf(buf, "%d\n",
  197. channel_pending(hv_dev->channel,
  198. vmbus_connection.monitor_pages[1]));
  199. }
  200. static DEVICE_ATTR_RO(server_monitor_pending);
  201. static ssize_t client_monitor_pending_show(struct device *dev,
  202. struct device_attribute *dev_attr,
  203. char *buf)
  204. {
  205. struct hv_device *hv_dev = device_to_hv_device(dev);
  206. if (!hv_dev->channel)
  207. return -ENODEV;
  208. return sprintf(buf, "%d\n",
  209. channel_pending(hv_dev->channel,
  210. vmbus_connection.monitor_pages[1]));
  211. }
  212. static DEVICE_ATTR_RO(client_monitor_pending);
  213. static ssize_t server_monitor_latency_show(struct device *dev,
  214. struct device_attribute *dev_attr,
  215. char *buf)
  216. {
  217. struct hv_device *hv_dev = device_to_hv_device(dev);
  218. if (!hv_dev->channel)
  219. return -ENODEV;
  220. return sprintf(buf, "%d\n",
  221. channel_latency(hv_dev->channel,
  222. vmbus_connection.monitor_pages[0]));
  223. }
  224. static DEVICE_ATTR_RO(server_monitor_latency);
  225. static ssize_t client_monitor_latency_show(struct device *dev,
  226. struct device_attribute *dev_attr,
  227. char *buf)
  228. {
  229. struct hv_device *hv_dev = device_to_hv_device(dev);
  230. if (!hv_dev->channel)
  231. return -ENODEV;
  232. return sprintf(buf, "%d\n",
  233. channel_latency(hv_dev->channel,
  234. vmbus_connection.monitor_pages[1]));
  235. }
  236. static DEVICE_ATTR_RO(client_monitor_latency);
  237. static ssize_t server_monitor_conn_id_show(struct device *dev,
  238. struct device_attribute *dev_attr,
  239. char *buf)
  240. {
  241. struct hv_device *hv_dev = device_to_hv_device(dev);
  242. if (!hv_dev->channel)
  243. return -ENODEV;
  244. return sprintf(buf, "%d\n",
  245. channel_conn_id(hv_dev->channel,
  246. vmbus_connection.monitor_pages[0]));
  247. }
  248. static DEVICE_ATTR_RO(server_monitor_conn_id);
  249. static ssize_t client_monitor_conn_id_show(struct device *dev,
  250. struct device_attribute *dev_attr,
  251. char *buf)
  252. {
  253. struct hv_device *hv_dev = device_to_hv_device(dev);
  254. if (!hv_dev->channel)
  255. return -ENODEV;
  256. return sprintf(buf, "%d\n",
  257. channel_conn_id(hv_dev->channel,
  258. vmbus_connection.monitor_pages[1]));
  259. }
  260. static DEVICE_ATTR_RO(client_monitor_conn_id);
  261. static ssize_t out_intr_mask_show(struct device *dev,
  262. struct device_attribute *dev_attr, char *buf)
  263. {
  264. struct hv_device *hv_dev = device_to_hv_device(dev);
  265. struct hv_ring_buffer_debug_info outbound;
  266. if (!hv_dev->channel)
  267. return -ENODEV;
  268. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  269. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  270. }
  271. static DEVICE_ATTR_RO(out_intr_mask);
  272. static ssize_t out_read_index_show(struct device *dev,
  273. struct device_attribute *dev_attr, char *buf)
  274. {
  275. struct hv_device *hv_dev = device_to_hv_device(dev);
  276. struct hv_ring_buffer_debug_info outbound;
  277. if (!hv_dev->channel)
  278. return -ENODEV;
  279. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  280. return sprintf(buf, "%d\n", outbound.current_read_index);
  281. }
  282. static DEVICE_ATTR_RO(out_read_index);
  283. static ssize_t out_write_index_show(struct device *dev,
  284. struct device_attribute *dev_attr,
  285. char *buf)
  286. {
  287. struct hv_device *hv_dev = device_to_hv_device(dev);
  288. struct hv_ring_buffer_debug_info outbound;
  289. if (!hv_dev->channel)
  290. return -ENODEV;
  291. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  292. return sprintf(buf, "%d\n", outbound.current_write_index);
  293. }
  294. static DEVICE_ATTR_RO(out_write_index);
  295. static ssize_t out_read_bytes_avail_show(struct device *dev,
  296. struct device_attribute *dev_attr,
  297. char *buf)
  298. {
  299. struct hv_device *hv_dev = device_to_hv_device(dev);
  300. struct hv_ring_buffer_debug_info outbound;
  301. if (!hv_dev->channel)
  302. return -ENODEV;
  303. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  304. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  305. }
  306. static DEVICE_ATTR_RO(out_read_bytes_avail);
  307. static ssize_t out_write_bytes_avail_show(struct device *dev,
  308. struct device_attribute *dev_attr,
  309. char *buf)
  310. {
  311. struct hv_device *hv_dev = device_to_hv_device(dev);
  312. struct hv_ring_buffer_debug_info outbound;
  313. if (!hv_dev->channel)
  314. return -ENODEV;
  315. hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
  316. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  317. }
  318. static DEVICE_ATTR_RO(out_write_bytes_avail);
  319. static ssize_t in_intr_mask_show(struct device *dev,
  320. struct device_attribute *dev_attr, char *buf)
  321. {
  322. struct hv_device *hv_dev = device_to_hv_device(dev);
  323. struct hv_ring_buffer_debug_info inbound;
  324. if (!hv_dev->channel)
  325. return -ENODEV;
  326. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  327. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  328. }
  329. static DEVICE_ATTR_RO(in_intr_mask);
  330. static ssize_t in_read_index_show(struct device *dev,
  331. struct device_attribute *dev_attr, char *buf)
  332. {
  333. struct hv_device *hv_dev = device_to_hv_device(dev);
  334. struct hv_ring_buffer_debug_info inbound;
  335. if (!hv_dev->channel)
  336. return -ENODEV;
  337. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  338. return sprintf(buf, "%d\n", inbound.current_read_index);
  339. }
  340. static DEVICE_ATTR_RO(in_read_index);
  341. static ssize_t in_write_index_show(struct device *dev,
  342. struct device_attribute *dev_attr, char *buf)
  343. {
  344. struct hv_device *hv_dev = device_to_hv_device(dev);
  345. struct hv_ring_buffer_debug_info inbound;
  346. if (!hv_dev->channel)
  347. return -ENODEV;
  348. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  349. return sprintf(buf, "%d\n", inbound.current_write_index);
  350. }
  351. static DEVICE_ATTR_RO(in_write_index);
  352. static ssize_t in_read_bytes_avail_show(struct device *dev,
  353. struct device_attribute *dev_attr,
  354. char *buf)
  355. {
  356. struct hv_device *hv_dev = device_to_hv_device(dev);
  357. struct hv_ring_buffer_debug_info inbound;
  358. if (!hv_dev->channel)
  359. return -ENODEV;
  360. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  361. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  362. }
  363. static DEVICE_ATTR_RO(in_read_bytes_avail);
  364. static ssize_t in_write_bytes_avail_show(struct device *dev,
  365. struct device_attribute *dev_attr,
  366. char *buf)
  367. {
  368. struct hv_device *hv_dev = device_to_hv_device(dev);
  369. struct hv_ring_buffer_debug_info inbound;
  370. if (!hv_dev->channel)
  371. return -ENODEV;
  372. hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  373. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  374. }
  375. static DEVICE_ATTR_RO(in_write_bytes_avail);
  376. static ssize_t channel_vp_mapping_show(struct device *dev,
  377. struct device_attribute *dev_attr,
  378. char *buf)
  379. {
  380. struct hv_device *hv_dev = device_to_hv_device(dev);
  381. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  382. unsigned long flags;
  383. int buf_size = PAGE_SIZE, n_written, tot_written;
  384. struct list_head *cur;
  385. if (!channel)
  386. return -ENODEV;
  387. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  388. channel->offermsg.child_relid, channel->target_cpu);
  389. spin_lock_irqsave(&channel->lock, flags);
  390. list_for_each(cur, &channel->sc_list) {
  391. if (tot_written >= buf_size - 1)
  392. break;
  393. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  394. n_written = scnprintf(buf + tot_written,
  395. buf_size - tot_written,
  396. "%u:%u\n",
  397. cur_sc->offermsg.child_relid,
  398. cur_sc->target_cpu);
  399. tot_written += n_written;
  400. }
  401. spin_unlock_irqrestore(&channel->lock, flags);
  402. return tot_written;
  403. }
  404. static DEVICE_ATTR_RO(channel_vp_mapping);
  405. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  406. static struct attribute *vmbus_attrs[] = {
  407. &dev_attr_id.attr,
  408. &dev_attr_state.attr,
  409. &dev_attr_monitor_id.attr,
  410. &dev_attr_class_id.attr,
  411. &dev_attr_device_id.attr,
  412. &dev_attr_modalias.attr,
  413. &dev_attr_server_monitor_pending.attr,
  414. &dev_attr_client_monitor_pending.attr,
  415. &dev_attr_server_monitor_latency.attr,
  416. &dev_attr_client_monitor_latency.attr,
  417. &dev_attr_server_monitor_conn_id.attr,
  418. &dev_attr_client_monitor_conn_id.attr,
  419. &dev_attr_out_intr_mask.attr,
  420. &dev_attr_out_read_index.attr,
  421. &dev_attr_out_write_index.attr,
  422. &dev_attr_out_read_bytes_avail.attr,
  423. &dev_attr_out_write_bytes_avail.attr,
  424. &dev_attr_in_intr_mask.attr,
  425. &dev_attr_in_read_index.attr,
  426. &dev_attr_in_write_index.attr,
  427. &dev_attr_in_read_bytes_avail.attr,
  428. &dev_attr_in_write_bytes_avail.attr,
  429. &dev_attr_channel_vp_mapping.attr,
  430. NULL,
  431. };
  432. ATTRIBUTE_GROUPS(vmbus);
  433. /*
  434. * vmbus_uevent - add uevent for our device
  435. *
  436. * This routine is invoked when a device is added or removed on the vmbus to
  437. * generate a uevent to udev in the userspace. The udev will then look at its
  438. * rule and the uevent generated here to load the appropriate driver
  439. *
  440. * The alias string will be of the form vmbus:guid where guid is the string
  441. * representation of the device guid (each byte of the guid will be
  442. * represented with two hex characters.
  443. */
  444. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  445. {
  446. struct hv_device *dev = device_to_hv_device(device);
  447. int ret;
  448. char alias_name[VMBUS_ALIAS_LEN + 1];
  449. print_alias_name(dev, alias_name);
  450. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  451. return ret;
  452. }
  453. static const uuid_le null_guid;
  454. static inline bool is_null_guid(const __u8 *guid)
  455. {
  456. if (memcmp(guid, &null_guid, sizeof(uuid_le)))
  457. return false;
  458. return true;
  459. }
  460. /*
  461. * Return a matching hv_vmbus_device_id pointer.
  462. * If there is no match, return NULL.
  463. */
  464. static const struct hv_vmbus_device_id *hv_vmbus_get_id(
  465. const struct hv_vmbus_device_id *id,
  466. const __u8 *guid)
  467. {
  468. for (; !is_null_guid(id->guid); id++)
  469. if (!memcmp(&id->guid, guid, sizeof(uuid_le)))
  470. return id;
  471. return NULL;
  472. }
  473. /*
  474. * vmbus_match - Attempt to match the specified device to the specified driver
  475. */
  476. static int vmbus_match(struct device *device, struct device_driver *driver)
  477. {
  478. struct hv_driver *drv = drv_to_hv_drv(driver);
  479. struct hv_device *hv_dev = device_to_hv_device(device);
  480. if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b))
  481. return 1;
  482. return 0;
  483. }
  484. /*
  485. * vmbus_probe - Add the new vmbus's child device
  486. */
  487. static int vmbus_probe(struct device *child_device)
  488. {
  489. int ret = 0;
  490. struct hv_driver *drv =
  491. drv_to_hv_drv(child_device->driver);
  492. struct hv_device *dev = device_to_hv_device(child_device);
  493. const struct hv_vmbus_device_id *dev_id;
  494. dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b);
  495. if (drv->probe) {
  496. ret = drv->probe(dev, dev_id);
  497. if (ret != 0)
  498. pr_err("probe failed for device %s (%d)\n",
  499. dev_name(child_device), ret);
  500. } else {
  501. pr_err("probe not set for driver %s\n",
  502. dev_name(child_device));
  503. ret = -ENODEV;
  504. }
  505. return ret;
  506. }
  507. /*
  508. * vmbus_remove - Remove a vmbus device
  509. */
  510. static int vmbus_remove(struct device *child_device)
  511. {
  512. struct hv_driver *drv;
  513. struct hv_device *dev = device_to_hv_device(child_device);
  514. u32 relid = dev->channel->offermsg.child_relid;
  515. if (child_device->driver) {
  516. drv = drv_to_hv_drv(child_device->driver);
  517. if (drv->remove)
  518. drv->remove(dev);
  519. else {
  520. hv_process_channel_removal(dev->channel, relid);
  521. pr_err("remove not set for driver %s\n",
  522. dev_name(child_device));
  523. }
  524. } else {
  525. /*
  526. * We don't have a driver for this device; deal with the
  527. * rescind message by removing the channel.
  528. */
  529. hv_process_channel_removal(dev->channel, relid);
  530. }
  531. return 0;
  532. }
  533. /*
  534. * vmbus_shutdown - Shutdown a vmbus device
  535. */
  536. static void vmbus_shutdown(struct device *child_device)
  537. {
  538. struct hv_driver *drv;
  539. struct hv_device *dev = device_to_hv_device(child_device);
  540. /* The device may not be attached yet */
  541. if (!child_device->driver)
  542. return;
  543. drv = drv_to_hv_drv(child_device->driver);
  544. if (drv->shutdown)
  545. drv->shutdown(dev);
  546. return;
  547. }
  548. /*
  549. * vmbus_device_release - Final callback release of the vmbus child device
  550. */
  551. static void vmbus_device_release(struct device *device)
  552. {
  553. struct hv_device *hv_dev = device_to_hv_device(device);
  554. kfree(hv_dev);
  555. }
  556. /* The one and only one */
  557. static struct bus_type hv_bus = {
  558. .name = "vmbus",
  559. .match = vmbus_match,
  560. .shutdown = vmbus_shutdown,
  561. .remove = vmbus_remove,
  562. .probe = vmbus_probe,
  563. .uevent = vmbus_uevent,
  564. .dev_groups = vmbus_groups,
  565. };
  566. struct onmessage_work_context {
  567. struct work_struct work;
  568. struct hv_message msg;
  569. };
  570. static void vmbus_onmessage_work(struct work_struct *work)
  571. {
  572. struct onmessage_work_context *ctx;
  573. /* Do not process messages if we're in DISCONNECTED state */
  574. if (vmbus_connection.conn_state == DISCONNECTED)
  575. return;
  576. ctx = container_of(work, struct onmessage_work_context,
  577. work);
  578. vmbus_onmessage(&ctx->msg);
  579. kfree(ctx);
  580. }
  581. static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
  582. {
  583. struct clock_event_device *dev = hv_context.clk_evt[cpu];
  584. if (dev->event_handler)
  585. dev->event_handler(dev);
  586. msg->header.message_type = HVMSG_NONE;
  587. /*
  588. * Make sure the write to MessageType (ie set to
  589. * HVMSG_NONE) happens before we read the
  590. * MessagePending and EOMing. Otherwise, the EOMing
  591. * will not deliver any more messages since there is
  592. * no empty slot
  593. */
  594. mb();
  595. if (msg->header.message_flags.msg_pending) {
  596. /*
  597. * This will cause message queue rescan to
  598. * possibly deliver another msg from the
  599. * hypervisor
  600. */
  601. wrmsrl(HV_X64_MSR_EOM, 0);
  602. }
  603. }
  604. static void vmbus_on_msg_dpc(unsigned long data)
  605. {
  606. int cpu = smp_processor_id();
  607. void *page_addr = hv_context.synic_message_page[cpu];
  608. struct hv_message *msg = (struct hv_message *)page_addr +
  609. VMBUS_MESSAGE_SINT;
  610. struct vmbus_channel_message_header *hdr;
  611. struct vmbus_channel_message_table_entry *entry;
  612. struct onmessage_work_context *ctx;
  613. while (1) {
  614. if (msg->header.message_type == HVMSG_NONE)
  615. /* no msg */
  616. break;
  617. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  618. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  619. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  620. goto msg_handled;
  621. }
  622. entry = &channel_message_table[hdr->msgtype];
  623. if (entry->handler_type == VMHT_BLOCKING) {
  624. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  625. if (ctx == NULL)
  626. continue;
  627. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  628. memcpy(&ctx->msg, msg, sizeof(*msg));
  629. queue_work(vmbus_connection.work_queue, &ctx->work);
  630. } else
  631. entry->message_handler(hdr);
  632. msg_handled:
  633. msg->header.message_type = HVMSG_NONE;
  634. /*
  635. * Make sure the write to MessageType (ie set to
  636. * HVMSG_NONE) happens before we read the
  637. * MessagePending and EOMing. Otherwise, the EOMing
  638. * will not deliver any more messages since there is
  639. * no empty slot
  640. */
  641. mb();
  642. if (msg->header.message_flags.msg_pending) {
  643. /*
  644. * This will cause message queue rescan to
  645. * possibly deliver another msg from the
  646. * hypervisor
  647. */
  648. wrmsrl(HV_X64_MSR_EOM, 0);
  649. }
  650. }
  651. }
  652. static void vmbus_isr(void)
  653. {
  654. int cpu = smp_processor_id();
  655. void *page_addr;
  656. struct hv_message *msg;
  657. union hv_synic_event_flags *event;
  658. bool handled = false;
  659. page_addr = hv_context.synic_event_page[cpu];
  660. if (page_addr == NULL)
  661. return;
  662. event = (union hv_synic_event_flags *)page_addr +
  663. VMBUS_MESSAGE_SINT;
  664. /*
  665. * Check for events before checking for messages. This is the order
  666. * in which events and messages are checked in Windows guests on
  667. * Hyper-V, and the Windows team suggested we do the same.
  668. */
  669. if ((vmbus_proto_version == VERSION_WS2008) ||
  670. (vmbus_proto_version == VERSION_WIN7)) {
  671. /* Since we are a child, we only need to check bit 0 */
  672. if (sync_test_and_clear_bit(0,
  673. (unsigned long *) &event->flags32[0])) {
  674. handled = true;
  675. }
  676. } else {
  677. /*
  678. * Our host is win8 or above. The signaling mechanism
  679. * has changed and we can directly look at the event page.
  680. * If bit n is set then we have an interrup on the channel
  681. * whose id is n.
  682. */
  683. handled = true;
  684. }
  685. if (handled)
  686. tasklet_schedule(hv_context.event_dpc[cpu]);
  687. page_addr = hv_context.synic_message_page[cpu];
  688. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  689. /* Check if there are actual msgs to be processed */
  690. if (msg->header.message_type != HVMSG_NONE) {
  691. if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
  692. hv_process_timer_expiration(msg, cpu);
  693. else
  694. tasklet_schedule(&msg_dpc);
  695. }
  696. }
  697. /*
  698. * vmbus_bus_init -Main vmbus driver initialization routine.
  699. *
  700. * Here, we
  701. * - initialize the vmbus driver context
  702. * - invoke the vmbus hv main init routine
  703. * - get the irq resource
  704. * - retrieve the channel offers
  705. */
  706. static int vmbus_bus_init(int irq)
  707. {
  708. int ret;
  709. /* Hypervisor initialization...setup hypercall page..etc */
  710. ret = hv_init();
  711. if (ret != 0) {
  712. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  713. return ret;
  714. }
  715. tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
  716. ret = bus_register(&hv_bus);
  717. if (ret)
  718. goto err_cleanup;
  719. hv_setup_vmbus_irq(vmbus_isr);
  720. ret = hv_synic_alloc();
  721. if (ret)
  722. goto err_alloc;
  723. /*
  724. * Initialize the per-cpu interrupt state and
  725. * connect to the host.
  726. */
  727. on_each_cpu(hv_synic_init, NULL, 1);
  728. ret = vmbus_connect();
  729. if (ret)
  730. goto err_alloc;
  731. if (vmbus_proto_version > VERSION_WIN7)
  732. cpu_hotplug_disable();
  733. /*
  734. * Only register if the crash MSRs are available
  735. */
  736. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  737. register_die_notifier(&hyperv_die_block);
  738. atomic_notifier_chain_register(&panic_notifier_list,
  739. &hyperv_panic_block);
  740. }
  741. vmbus_request_offers();
  742. return 0;
  743. err_alloc:
  744. hv_synic_free();
  745. hv_remove_vmbus_irq();
  746. bus_unregister(&hv_bus);
  747. err_cleanup:
  748. hv_cleanup();
  749. return ret;
  750. }
  751. /**
  752. * __vmbus_child_driver_register() - Register a vmbus's driver
  753. * @hv_driver: Pointer to driver structure you want to register
  754. * @owner: owner module of the drv
  755. * @mod_name: module name string
  756. *
  757. * Registers the given driver with Linux through the 'driver_register()' call
  758. * and sets up the hyper-v vmbus handling for this driver.
  759. * It will return the state of the 'driver_register()' call.
  760. *
  761. */
  762. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  763. {
  764. int ret;
  765. pr_info("registering driver %s\n", hv_driver->name);
  766. ret = vmbus_exists();
  767. if (ret < 0)
  768. return ret;
  769. hv_driver->driver.name = hv_driver->name;
  770. hv_driver->driver.owner = owner;
  771. hv_driver->driver.mod_name = mod_name;
  772. hv_driver->driver.bus = &hv_bus;
  773. ret = driver_register(&hv_driver->driver);
  774. return ret;
  775. }
  776. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  777. /**
  778. * vmbus_driver_unregister() - Unregister a vmbus's driver
  779. * @hv_driver: Pointer to driver structure you want to
  780. * un-register
  781. *
  782. * Un-register the given driver that was previous registered with a call to
  783. * vmbus_driver_register()
  784. */
  785. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  786. {
  787. pr_info("unregistering driver %s\n", hv_driver->name);
  788. if (!vmbus_exists())
  789. driver_unregister(&hv_driver->driver);
  790. }
  791. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  792. /*
  793. * vmbus_device_create - Creates and registers a new child device
  794. * on the vmbus.
  795. */
  796. struct hv_device *vmbus_device_create(const uuid_le *type,
  797. const uuid_le *instance,
  798. struct vmbus_channel *channel)
  799. {
  800. struct hv_device *child_device_obj;
  801. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  802. if (!child_device_obj) {
  803. pr_err("Unable to allocate device object for child device\n");
  804. return NULL;
  805. }
  806. child_device_obj->channel = channel;
  807. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  808. memcpy(&child_device_obj->dev_instance, instance,
  809. sizeof(uuid_le));
  810. return child_device_obj;
  811. }
  812. /*
  813. * vmbus_device_register - Register the child device
  814. */
  815. int vmbus_device_register(struct hv_device *child_device_obj)
  816. {
  817. int ret = 0;
  818. dev_set_name(&child_device_obj->device, "vmbus_%d",
  819. child_device_obj->channel->id);
  820. child_device_obj->device.bus = &hv_bus;
  821. child_device_obj->device.parent = &hv_acpi_dev->dev;
  822. child_device_obj->device.release = vmbus_device_release;
  823. /*
  824. * Register with the LDM. This will kick off the driver/device
  825. * binding...which will eventually call vmbus_match() and vmbus_probe()
  826. */
  827. ret = device_register(&child_device_obj->device);
  828. if (ret)
  829. pr_err("Unable to register child device\n");
  830. else
  831. pr_debug("child device %s registered\n",
  832. dev_name(&child_device_obj->device));
  833. return ret;
  834. }
  835. /*
  836. * vmbus_device_unregister - Remove the specified child device
  837. * from the vmbus.
  838. */
  839. void vmbus_device_unregister(struct hv_device *device_obj)
  840. {
  841. pr_debug("child device %s unregistered\n",
  842. dev_name(&device_obj->device));
  843. /*
  844. * Kick off the process of unregistering the device.
  845. * This will call vmbus_remove() and eventually vmbus_device_release()
  846. */
  847. device_unregister(&device_obj->device);
  848. }
  849. /*
  850. * VMBUS is an acpi enumerated device. Get the information we
  851. * need from DSDT.
  852. */
  853. #define VTPM_BASE_ADDRESS 0xfed40000
  854. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  855. {
  856. resource_size_t start = 0;
  857. resource_size_t end = 0;
  858. struct resource *new_res;
  859. struct resource **old_res = &hyperv_mmio;
  860. struct resource **prev_res = NULL;
  861. switch (res->type) {
  862. case ACPI_RESOURCE_TYPE_IRQ:
  863. irq = res->data.irq.interrupts[0];
  864. return AE_OK;
  865. /*
  866. * "Address" descriptors are for bus windows. Ignore
  867. * "memory" descriptors, which are for registers on
  868. * devices.
  869. */
  870. case ACPI_RESOURCE_TYPE_ADDRESS32:
  871. start = res->data.address32.address.minimum;
  872. end = res->data.address32.address.maximum;
  873. break;
  874. case ACPI_RESOURCE_TYPE_ADDRESS64:
  875. start = res->data.address64.address.minimum;
  876. end = res->data.address64.address.maximum;
  877. break;
  878. default:
  879. /* Unused resource type */
  880. return AE_OK;
  881. }
  882. /*
  883. * Ignore ranges that are below 1MB, as they're not
  884. * necessary or useful here.
  885. */
  886. if (end < 0x100000)
  887. return AE_OK;
  888. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  889. if (!new_res)
  890. return AE_NO_MEMORY;
  891. /* If this range overlaps the virtual TPM, truncate it. */
  892. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  893. end = VTPM_BASE_ADDRESS;
  894. new_res->name = "hyperv mmio";
  895. new_res->flags = IORESOURCE_MEM;
  896. new_res->start = start;
  897. new_res->end = end;
  898. do {
  899. if (!*old_res) {
  900. *old_res = new_res;
  901. break;
  902. }
  903. if ((*old_res)->end < new_res->start) {
  904. new_res->sibling = *old_res;
  905. if (prev_res)
  906. (*prev_res)->sibling = new_res;
  907. *old_res = new_res;
  908. break;
  909. }
  910. prev_res = old_res;
  911. old_res = &(*old_res)->sibling;
  912. } while (1);
  913. return AE_OK;
  914. }
  915. static int vmbus_acpi_remove(struct acpi_device *device)
  916. {
  917. struct resource *cur_res;
  918. struct resource *next_res;
  919. if (hyperv_mmio) {
  920. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  921. next_res = cur_res->sibling;
  922. kfree(cur_res);
  923. }
  924. }
  925. return 0;
  926. }
  927. /**
  928. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  929. * @new: If successful, supplied a pointer to the
  930. * allocated MMIO space.
  931. * @device_obj: Identifies the caller
  932. * @min: Minimum guest physical address of the
  933. * allocation
  934. * @max: Maximum guest physical address
  935. * @size: Size of the range to be allocated
  936. * @align: Alignment of the range to be allocated
  937. * @fb_overlap_ok: Whether this allocation can be allowed
  938. * to overlap the video frame buffer.
  939. *
  940. * This function walks the resources granted to VMBus by the
  941. * _CRS object in the ACPI namespace underneath the parent
  942. * "bridge" whether that's a root PCI bus in the Generation 1
  943. * case or a Module Device in the Generation 2 case. It then
  944. * attempts to allocate from the global MMIO pool in a way that
  945. * matches the constraints supplied in these parameters and by
  946. * that _CRS.
  947. *
  948. * Return: 0 on success, -errno on failure
  949. */
  950. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  951. resource_size_t min, resource_size_t max,
  952. resource_size_t size, resource_size_t align,
  953. bool fb_overlap_ok)
  954. {
  955. struct resource *iter;
  956. resource_size_t range_min, range_max, start, local_min, local_max;
  957. const char *dev_n = dev_name(&device_obj->device);
  958. u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
  959. int i;
  960. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  961. if ((iter->start >= max) || (iter->end <= min))
  962. continue;
  963. range_min = iter->start;
  964. range_max = iter->end;
  965. /* If this range overlaps the frame buffer, split it into
  966. two tries. */
  967. for (i = 0; i < 2; i++) {
  968. local_min = range_min;
  969. local_max = range_max;
  970. if (fb_overlap_ok || (range_min >= fb_end) ||
  971. (range_max <= screen_info.lfb_base)) {
  972. i++;
  973. } else {
  974. if ((range_min <= screen_info.lfb_base) &&
  975. (range_max >= screen_info.lfb_base)) {
  976. /*
  977. * The frame buffer is in this window,
  978. * so trim this into the part that
  979. * preceeds the frame buffer.
  980. */
  981. local_max = screen_info.lfb_base - 1;
  982. range_min = fb_end;
  983. } else {
  984. range_min = fb_end;
  985. continue;
  986. }
  987. }
  988. start = (local_min + align - 1) & ~(align - 1);
  989. for (; start + size - 1 <= local_max; start += align) {
  990. *new = request_mem_region_exclusive(start, size,
  991. dev_n);
  992. if (*new)
  993. return 0;
  994. }
  995. }
  996. }
  997. return -ENXIO;
  998. }
  999. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  1000. static int vmbus_acpi_add(struct acpi_device *device)
  1001. {
  1002. acpi_status result;
  1003. int ret_val = -ENODEV;
  1004. struct acpi_device *ancestor;
  1005. hv_acpi_dev = device;
  1006. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1007. vmbus_walk_resources, NULL);
  1008. if (ACPI_FAILURE(result))
  1009. goto acpi_walk_err;
  1010. /*
  1011. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1012. * firmware) is the VMOD that has the mmio ranges. Get that.
  1013. */
  1014. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1015. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1016. vmbus_walk_resources, NULL);
  1017. if (ACPI_FAILURE(result))
  1018. continue;
  1019. if (hyperv_mmio)
  1020. break;
  1021. }
  1022. ret_val = 0;
  1023. acpi_walk_err:
  1024. complete(&probe_event);
  1025. if (ret_val)
  1026. vmbus_acpi_remove(device);
  1027. return ret_val;
  1028. }
  1029. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  1030. {"VMBUS", 0},
  1031. {"VMBus", 0},
  1032. {"", 0},
  1033. };
  1034. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  1035. static struct acpi_driver vmbus_acpi_driver = {
  1036. .name = "vmbus",
  1037. .ids = vmbus_acpi_device_ids,
  1038. .ops = {
  1039. .add = vmbus_acpi_add,
  1040. .remove = vmbus_acpi_remove,
  1041. },
  1042. };
  1043. static void hv_kexec_handler(void)
  1044. {
  1045. int cpu;
  1046. hv_synic_clockevents_cleanup();
  1047. vmbus_initiate_unload();
  1048. for_each_online_cpu(cpu)
  1049. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1050. hv_cleanup();
  1051. };
  1052. static void hv_crash_handler(struct pt_regs *regs)
  1053. {
  1054. vmbus_initiate_unload();
  1055. /*
  1056. * In crash handler we can't schedule synic cleanup for all CPUs,
  1057. * doing the cleanup for current CPU only. This should be sufficient
  1058. * for kdump.
  1059. */
  1060. hv_synic_cleanup(NULL);
  1061. hv_cleanup();
  1062. };
  1063. static int __init hv_acpi_init(void)
  1064. {
  1065. int ret, t;
  1066. if (x86_hyper != &x86_hyper_ms_hyperv)
  1067. return -ENODEV;
  1068. init_completion(&probe_event);
  1069. /*
  1070. * Get irq resources first.
  1071. */
  1072. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  1073. if (ret)
  1074. return ret;
  1075. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  1076. if (t == 0) {
  1077. ret = -ETIMEDOUT;
  1078. goto cleanup;
  1079. }
  1080. if (irq <= 0) {
  1081. ret = -ENODEV;
  1082. goto cleanup;
  1083. }
  1084. ret = vmbus_bus_init(irq);
  1085. if (ret)
  1086. goto cleanup;
  1087. hv_setup_kexec_handler(hv_kexec_handler);
  1088. hv_setup_crash_handler(hv_crash_handler);
  1089. return 0;
  1090. cleanup:
  1091. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1092. hv_acpi_dev = NULL;
  1093. return ret;
  1094. }
  1095. static void __exit vmbus_exit(void)
  1096. {
  1097. int cpu;
  1098. hv_remove_kexec_handler();
  1099. hv_remove_crash_handler();
  1100. vmbus_connection.conn_state = DISCONNECTED;
  1101. hv_synic_clockevents_cleanup();
  1102. vmbus_disconnect();
  1103. hv_remove_vmbus_irq();
  1104. tasklet_kill(&msg_dpc);
  1105. vmbus_free_channels();
  1106. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1107. unregister_die_notifier(&hyperv_die_block);
  1108. atomic_notifier_chain_unregister(&panic_notifier_list,
  1109. &hyperv_panic_block);
  1110. }
  1111. bus_unregister(&hv_bus);
  1112. hv_cleanup();
  1113. for_each_online_cpu(cpu) {
  1114. tasklet_kill(hv_context.event_dpc[cpu]);
  1115. smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
  1116. }
  1117. hv_synic_free();
  1118. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1119. if (vmbus_proto_version > VERSION_WIN7)
  1120. cpu_hotplug_enable();
  1121. }
  1122. MODULE_LICENSE("GPL");
  1123. subsys_initcall(hv_acpi_init);
  1124. module_exit(vmbus_exit);