uncore.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390
  1. #include <linux/module.h>
  2. #include <asm/cpu_device_id.h>
  3. #include <asm/intel-family.h>
  4. #include "uncore.h"
  5. static struct intel_uncore_type *empty_uncore[] = { NULL, };
  6. struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
  7. struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
  8. static bool pcidrv_registered;
  9. struct pci_driver *uncore_pci_driver;
  10. /* pci bus to socket mapping */
  11. DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
  12. struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
  13. struct pci_extra_dev *uncore_extra_pci_dev;
  14. static int max_packages;
  15. /* mask of cpus that collect uncore events */
  16. static cpumask_t uncore_cpu_mask;
  17. /* constraint for the fixed counter */
  18. static struct event_constraint uncore_constraint_fixed =
  19. EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
  20. struct event_constraint uncore_constraint_empty =
  21. EVENT_CONSTRAINT(0, 0, 0);
  22. MODULE_LICENSE("GPL");
  23. static int uncore_pcibus_to_physid(struct pci_bus *bus)
  24. {
  25. struct pci2phy_map *map;
  26. int phys_id = -1;
  27. raw_spin_lock(&pci2phy_map_lock);
  28. list_for_each_entry(map, &pci2phy_map_head, list) {
  29. if (map->segment == pci_domain_nr(bus)) {
  30. phys_id = map->pbus_to_physid[bus->number];
  31. break;
  32. }
  33. }
  34. raw_spin_unlock(&pci2phy_map_lock);
  35. return phys_id;
  36. }
  37. static void uncore_free_pcibus_map(void)
  38. {
  39. struct pci2phy_map *map, *tmp;
  40. list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
  41. list_del(&map->list);
  42. kfree(map);
  43. }
  44. }
  45. struct pci2phy_map *__find_pci2phy_map(int segment)
  46. {
  47. struct pci2phy_map *map, *alloc = NULL;
  48. int i;
  49. lockdep_assert_held(&pci2phy_map_lock);
  50. lookup:
  51. list_for_each_entry(map, &pci2phy_map_head, list) {
  52. if (map->segment == segment)
  53. goto end;
  54. }
  55. if (!alloc) {
  56. raw_spin_unlock(&pci2phy_map_lock);
  57. alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
  58. raw_spin_lock(&pci2phy_map_lock);
  59. if (!alloc)
  60. return NULL;
  61. goto lookup;
  62. }
  63. map = alloc;
  64. alloc = NULL;
  65. map->segment = segment;
  66. for (i = 0; i < 256; i++)
  67. map->pbus_to_physid[i] = -1;
  68. list_add_tail(&map->list, &pci2phy_map_head);
  69. end:
  70. kfree(alloc);
  71. return map;
  72. }
  73. ssize_t uncore_event_show(struct kobject *kobj,
  74. struct kobj_attribute *attr, char *buf)
  75. {
  76. struct uncore_event_desc *event =
  77. container_of(attr, struct uncore_event_desc, attr);
  78. return sprintf(buf, "%s", event->config);
  79. }
  80. struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
  81. {
  82. unsigned int pkgid = topology_logical_package_id(cpu);
  83. /*
  84. * The unsigned check also catches the '-1' return value for non
  85. * existent mappings in the topology map.
  86. */
  87. return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
  88. }
  89. u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  90. {
  91. u64 count;
  92. rdmsrl(event->hw.event_base, count);
  93. return count;
  94. }
  95. /*
  96. * generic get constraint function for shared match/mask registers.
  97. */
  98. struct event_constraint *
  99. uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  100. {
  101. struct intel_uncore_extra_reg *er;
  102. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  103. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  104. unsigned long flags;
  105. bool ok = false;
  106. /*
  107. * reg->alloc can be set due to existing state, so for fake box we
  108. * need to ignore this, otherwise we might fail to allocate proper
  109. * fake state for this extra reg constraint.
  110. */
  111. if (reg1->idx == EXTRA_REG_NONE ||
  112. (!uncore_box_is_fake(box) && reg1->alloc))
  113. return NULL;
  114. er = &box->shared_regs[reg1->idx];
  115. raw_spin_lock_irqsave(&er->lock, flags);
  116. if (!atomic_read(&er->ref) ||
  117. (er->config1 == reg1->config && er->config2 == reg2->config)) {
  118. atomic_inc(&er->ref);
  119. er->config1 = reg1->config;
  120. er->config2 = reg2->config;
  121. ok = true;
  122. }
  123. raw_spin_unlock_irqrestore(&er->lock, flags);
  124. if (ok) {
  125. if (!uncore_box_is_fake(box))
  126. reg1->alloc = 1;
  127. return NULL;
  128. }
  129. return &uncore_constraint_empty;
  130. }
  131. void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  132. {
  133. struct intel_uncore_extra_reg *er;
  134. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  135. /*
  136. * Only put constraint if extra reg was actually allocated. Also
  137. * takes care of event which do not use an extra shared reg.
  138. *
  139. * Also, if this is a fake box we shouldn't touch any event state
  140. * (reg->alloc) and we don't care about leaving inconsistent box
  141. * state either since it will be thrown out.
  142. */
  143. if (uncore_box_is_fake(box) || !reg1->alloc)
  144. return;
  145. er = &box->shared_regs[reg1->idx];
  146. atomic_dec(&er->ref);
  147. reg1->alloc = 0;
  148. }
  149. u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
  150. {
  151. struct intel_uncore_extra_reg *er;
  152. unsigned long flags;
  153. u64 config;
  154. er = &box->shared_regs[idx];
  155. raw_spin_lock_irqsave(&er->lock, flags);
  156. config = er->config;
  157. raw_spin_unlock_irqrestore(&er->lock, flags);
  158. return config;
  159. }
  160. static void uncore_assign_hw_event(struct intel_uncore_box *box,
  161. struct perf_event *event, int idx)
  162. {
  163. struct hw_perf_event *hwc = &event->hw;
  164. hwc->idx = idx;
  165. hwc->last_tag = ++box->tags[idx];
  166. if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
  167. hwc->event_base = uncore_fixed_ctr(box);
  168. hwc->config_base = uncore_fixed_ctl(box);
  169. return;
  170. }
  171. hwc->config_base = uncore_event_ctl(box, hwc->idx);
  172. hwc->event_base = uncore_perf_ctr(box, hwc->idx);
  173. }
  174. void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
  175. {
  176. u64 prev_count, new_count, delta;
  177. int shift;
  178. if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
  179. shift = 64 - uncore_fixed_ctr_bits(box);
  180. else
  181. shift = 64 - uncore_perf_ctr_bits(box);
  182. /* the hrtimer might modify the previous event value */
  183. again:
  184. prev_count = local64_read(&event->hw.prev_count);
  185. new_count = uncore_read_counter(box, event);
  186. if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
  187. goto again;
  188. delta = (new_count << shift) - (prev_count << shift);
  189. delta >>= shift;
  190. local64_add(delta, &event->count);
  191. }
  192. /*
  193. * The overflow interrupt is unavailable for SandyBridge-EP, is broken
  194. * for SandyBridge. So we use hrtimer to periodically poll the counter
  195. * to avoid overflow.
  196. */
  197. static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
  198. {
  199. struct intel_uncore_box *box;
  200. struct perf_event *event;
  201. unsigned long flags;
  202. int bit;
  203. box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
  204. if (!box->n_active || box->cpu != smp_processor_id())
  205. return HRTIMER_NORESTART;
  206. /*
  207. * disable local interrupt to prevent uncore_pmu_event_start/stop
  208. * to interrupt the update process
  209. */
  210. local_irq_save(flags);
  211. /*
  212. * handle boxes with an active event list as opposed to active
  213. * counters
  214. */
  215. list_for_each_entry(event, &box->active_list, active_entry) {
  216. uncore_perf_event_update(box, event);
  217. }
  218. for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
  219. uncore_perf_event_update(box, box->events[bit]);
  220. local_irq_restore(flags);
  221. hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
  222. return HRTIMER_RESTART;
  223. }
  224. void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
  225. {
  226. hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
  227. HRTIMER_MODE_REL_PINNED);
  228. }
  229. void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
  230. {
  231. hrtimer_cancel(&box->hrtimer);
  232. }
  233. static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
  234. {
  235. hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  236. box->hrtimer.function = uncore_pmu_hrtimer;
  237. }
  238. static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
  239. int node)
  240. {
  241. int i, size, numshared = type->num_shared_regs ;
  242. struct intel_uncore_box *box;
  243. size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
  244. box = kzalloc_node(size, GFP_KERNEL, node);
  245. if (!box)
  246. return NULL;
  247. for (i = 0; i < numshared; i++)
  248. raw_spin_lock_init(&box->shared_regs[i].lock);
  249. uncore_pmu_init_hrtimer(box);
  250. box->cpu = -1;
  251. box->pci_phys_id = -1;
  252. box->pkgid = -1;
  253. /* set default hrtimer timeout */
  254. box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
  255. INIT_LIST_HEAD(&box->active_list);
  256. return box;
  257. }
  258. /*
  259. * Using uncore_pmu_event_init pmu event_init callback
  260. * as a detection point for uncore events.
  261. */
  262. static int uncore_pmu_event_init(struct perf_event *event);
  263. static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
  264. {
  265. return &box->pmu->pmu == event->pmu;
  266. }
  267. static int
  268. uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
  269. bool dogrp)
  270. {
  271. struct perf_event *event;
  272. int n, max_count;
  273. max_count = box->pmu->type->num_counters;
  274. if (box->pmu->type->fixed_ctl)
  275. max_count++;
  276. if (box->n_events >= max_count)
  277. return -EINVAL;
  278. n = box->n_events;
  279. if (is_box_event(box, leader)) {
  280. box->event_list[n] = leader;
  281. n++;
  282. }
  283. if (!dogrp)
  284. return n;
  285. list_for_each_entry(event, &leader->sibling_list, group_entry) {
  286. if (!is_box_event(box, event) ||
  287. event->state <= PERF_EVENT_STATE_OFF)
  288. continue;
  289. if (n >= max_count)
  290. return -EINVAL;
  291. box->event_list[n] = event;
  292. n++;
  293. }
  294. return n;
  295. }
  296. static struct event_constraint *
  297. uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
  298. {
  299. struct intel_uncore_type *type = box->pmu->type;
  300. struct event_constraint *c;
  301. if (type->ops->get_constraint) {
  302. c = type->ops->get_constraint(box, event);
  303. if (c)
  304. return c;
  305. }
  306. if (event->attr.config == UNCORE_FIXED_EVENT)
  307. return &uncore_constraint_fixed;
  308. if (type->constraints) {
  309. for_each_event_constraint(c, type->constraints) {
  310. if ((event->hw.config & c->cmask) == c->code)
  311. return c;
  312. }
  313. }
  314. return &type->unconstrainted;
  315. }
  316. static void uncore_put_event_constraint(struct intel_uncore_box *box,
  317. struct perf_event *event)
  318. {
  319. if (box->pmu->type->ops->put_constraint)
  320. box->pmu->type->ops->put_constraint(box, event);
  321. }
  322. static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
  323. {
  324. unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
  325. struct event_constraint *c;
  326. int i, wmin, wmax, ret = 0;
  327. struct hw_perf_event *hwc;
  328. bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
  329. for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
  330. c = uncore_get_event_constraint(box, box->event_list[i]);
  331. box->event_constraint[i] = c;
  332. wmin = min(wmin, c->weight);
  333. wmax = max(wmax, c->weight);
  334. }
  335. /* fastpath, try to reuse previous register */
  336. for (i = 0; i < n; i++) {
  337. hwc = &box->event_list[i]->hw;
  338. c = box->event_constraint[i];
  339. /* never assigned */
  340. if (hwc->idx == -1)
  341. break;
  342. /* constraint still honored */
  343. if (!test_bit(hwc->idx, c->idxmsk))
  344. break;
  345. /* not already used */
  346. if (test_bit(hwc->idx, used_mask))
  347. break;
  348. __set_bit(hwc->idx, used_mask);
  349. if (assign)
  350. assign[i] = hwc->idx;
  351. }
  352. /* slow path */
  353. if (i != n)
  354. ret = perf_assign_events(box->event_constraint, n,
  355. wmin, wmax, n, assign);
  356. if (!assign || ret) {
  357. for (i = 0; i < n; i++)
  358. uncore_put_event_constraint(box, box->event_list[i]);
  359. }
  360. return ret ? -EINVAL : 0;
  361. }
  362. static void uncore_pmu_event_start(struct perf_event *event, int flags)
  363. {
  364. struct intel_uncore_box *box = uncore_event_to_box(event);
  365. int idx = event->hw.idx;
  366. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  367. return;
  368. if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
  369. return;
  370. event->hw.state = 0;
  371. box->events[idx] = event;
  372. box->n_active++;
  373. __set_bit(idx, box->active_mask);
  374. local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
  375. uncore_enable_event(box, event);
  376. if (box->n_active == 1) {
  377. uncore_enable_box(box);
  378. uncore_pmu_start_hrtimer(box);
  379. }
  380. }
  381. static void uncore_pmu_event_stop(struct perf_event *event, int flags)
  382. {
  383. struct intel_uncore_box *box = uncore_event_to_box(event);
  384. struct hw_perf_event *hwc = &event->hw;
  385. if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
  386. uncore_disable_event(box, event);
  387. box->n_active--;
  388. box->events[hwc->idx] = NULL;
  389. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  390. hwc->state |= PERF_HES_STOPPED;
  391. if (box->n_active == 0) {
  392. uncore_disable_box(box);
  393. uncore_pmu_cancel_hrtimer(box);
  394. }
  395. }
  396. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  397. /*
  398. * Drain the remaining delta count out of a event
  399. * that we are disabling:
  400. */
  401. uncore_perf_event_update(box, event);
  402. hwc->state |= PERF_HES_UPTODATE;
  403. }
  404. }
  405. static int uncore_pmu_event_add(struct perf_event *event, int flags)
  406. {
  407. struct intel_uncore_box *box = uncore_event_to_box(event);
  408. struct hw_perf_event *hwc = &event->hw;
  409. int assign[UNCORE_PMC_IDX_MAX];
  410. int i, n, ret;
  411. if (!box)
  412. return -ENODEV;
  413. ret = n = uncore_collect_events(box, event, false);
  414. if (ret < 0)
  415. return ret;
  416. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  417. if (!(flags & PERF_EF_START))
  418. hwc->state |= PERF_HES_ARCH;
  419. ret = uncore_assign_events(box, assign, n);
  420. if (ret)
  421. return ret;
  422. /* save events moving to new counters */
  423. for (i = 0; i < box->n_events; i++) {
  424. event = box->event_list[i];
  425. hwc = &event->hw;
  426. if (hwc->idx == assign[i] &&
  427. hwc->last_tag == box->tags[assign[i]])
  428. continue;
  429. /*
  430. * Ensure we don't accidentally enable a stopped
  431. * counter simply because we rescheduled.
  432. */
  433. if (hwc->state & PERF_HES_STOPPED)
  434. hwc->state |= PERF_HES_ARCH;
  435. uncore_pmu_event_stop(event, PERF_EF_UPDATE);
  436. }
  437. /* reprogram moved events into new counters */
  438. for (i = 0; i < n; i++) {
  439. event = box->event_list[i];
  440. hwc = &event->hw;
  441. if (hwc->idx != assign[i] ||
  442. hwc->last_tag != box->tags[assign[i]])
  443. uncore_assign_hw_event(box, event, assign[i]);
  444. else if (i < box->n_events)
  445. continue;
  446. if (hwc->state & PERF_HES_ARCH)
  447. continue;
  448. uncore_pmu_event_start(event, 0);
  449. }
  450. box->n_events = n;
  451. return 0;
  452. }
  453. static void uncore_pmu_event_del(struct perf_event *event, int flags)
  454. {
  455. struct intel_uncore_box *box = uncore_event_to_box(event);
  456. int i;
  457. uncore_pmu_event_stop(event, PERF_EF_UPDATE);
  458. for (i = 0; i < box->n_events; i++) {
  459. if (event == box->event_list[i]) {
  460. uncore_put_event_constraint(box, event);
  461. for (++i; i < box->n_events; i++)
  462. box->event_list[i - 1] = box->event_list[i];
  463. --box->n_events;
  464. break;
  465. }
  466. }
  467. event->hw.idx = -1;
  468. event->hw.last_tag = ~0ULL;
  469. }
  470. void uncore_pmu_event_read(struct perf_event *event)
  471. {
  472. struct intel_uncore_box *box = uncore_event_to_box(event);
  473. uncore_perf_event_update(box, event);
  474. }
  475. /*
  476. * validation ensures the group can be loaded onto the
  477. * PMU if it was the only group available.
  478. */
  479. static int uncore_validate_group(struct intel_uncore_pmu *pmu,
  480. struct perf_event *event)
  481. {
  482. struct perf_event *leader = event->group_leader;
  483. struct intel_uncore_box *fake_box;
  484. int ret = -EINVAL, n;
  485. fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
  486. if (!fake_box)
  487. return -ENOMEM;
  488. fake_box->pmu = pmu;
  489. /*
  490. * the event is not yet connected with its
  491. * siblings therefore we must first collect
  492. * existing siblings, then add the new event
  493. * before we can simulate the scheduling
  494. */
  495. n = uncore_collect_events(fake_box, leader, true);
  496. if (n < 0)
  497. goto out;
  498. fake_box->n_events = n;
  499. n = uncore_collect_events(fake_box, event, false);
  500. if (n < 0)
  501. goto out;
  502. fake_box->n_events = n;
  503. ret = uncore_assign_events(fake_box, NULL, n);
  504. out:
  505. kfree(fake_box);
  506. return ret;
  507. }
  508. static int uncore_pmu_event_init(struct perf_event *event)
  509. {
  510. struct intel_uncore_pmu *pmu;
  511. struct intel_uncore_box *box;
  512. struct hw_perf_event *hwc = &event->hw;
  513. int ret;
  514. if (event->attr.type != event->pmu->type)
  515. return -ENOENT;
  516. pmu = uncore_event_to_pmu(event);
  517. /* no device found for this pmu */
  518. if (pmu->func_id < 0)
  519. return -ENOENT;
  520. /*
  521. * Uncore PMU does measure at all privilege level all the time.
  522. * So it doesn't make sense to specify any exclude bits.
  523. */
  524. if (event->attr.exclude_user || event->attr.exclude_kernel ||
  525. event->attr.exclude_hv || event->attr.exclude_idle)
  526. return -EINVAL;
  527. /* Sampling not supported yet */
  528. if (hwc->sample_period)
  529. return -EINVAL;
  530. /*
  531. * Place all uncore events for a particular physical package
  532. * onto a single cpu
  533. */
  534. if (event->cpu < 0)
  535. return -EINVAL;
  536. box = uncore_pmu_to_box(pmu, event->cpu);
  537. if (!box || box->cpu < 0)
  538. return -EINVAL;
  539. event->cpu = box->cpu;
  540. event->pmu_private = box;
  541. event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
  542. event->hw.idx = -1;
  543. event->hw.last_tag = ~0ULL;
  544. event->hw.extra_reg.idx = EXTRA_REG_NONE;
  545. event->hw.branch_reg.idx = EXTRA_REG_NONE;
  546. if (event->attr.config == UNCORE_FIXED_EVENT) {
  547. /* no fixed counter */
  548. if (!pmu->type->fixed_ctl)
  549. return -EINVAL;
  550. /*
  551. * if there is only one fixed counter, only the first pmu
  552. * can access the fixed counter
  553. */
  554. if (pmu->type->single_fixed && pmu->pmu_idx > 0)
  555. return -EINVAL;
  556. /* fixed counters have event field hardcoded to zero */
  557. hwc->config = 0ULL;
  558. } else {
  559. hwc->config = event->attr.config &
  560. (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
  561. if (pmu->type->ops->hw_config) {
  562. ret = pmu->type->ops->hw_config(box, event);
  563. if (ret)
  564. return ret;
  565. }
  566. }
  567. if (event->group_leader != event)
  568. ret = uncore_validate_group(pmu, event);
  569. else
  570. ret = 0;
  571. return ret;
  572. }
  573. static ssize_t uncore_get_attr_cpumask(struct device *dev,
  574. struct device_attribute *attr, char *buf)
  575. {
  576. return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
  577. }
  578. static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
  579. static struct attribute *uncore_pmu_attrs[] = {
  580. &dev_attr_cpumask.attr,
  581. NULL,
  582. };
  583. static struct attribute_group uncore_pmu_attr_group = {
  584. .attrs = uncore_pmu_attrs,
  585. };
  586. static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
  587. {
  588. int ret;
  589. if (!pmu->type->pmu) {
  590. pmu->pmu = (struct pmu) {
  591. .attr_groups = pmu->type->attr_groups,
  592. .task_ctx_nr = perf_invalid_context,
  593. .event_init = uncore_pmu_event_init,
  594. .add = uncore_pmu_event_add,
  595. .del = uncore_pmu_event_del,
  596. .start = uncore_pmu_event_start,
  597. .stop = uncore_pmu_event_stop,
  598. .read = uncore_pmu_event_read,
  599. .module = THIS_MODULE,
  600. };
  601. } else {
  602. pmu->pmu = *pmu->type->pmu;
  603. pmu->pmu.attr_groups = pmu->type->attr_groups;
  604. }
  605. if (pmu->type->num_boxes == 1) {
  606. if (strlen(pmu->type->name) > 0)
  607. sprintf(pmu->name, "uncore_%s", pmu->type->name);
  608. else
  609. sprintf(pmu->name, "uncore");
  610. } else {
  611. sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
  612. pmu->pmu_idx);
  613. }
  614. ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
  615. if (!ret)
  616. pmu->registered = true;
  617. return ret;
  618. }
  619. static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
  620. {
  621. if (!pmu->registered)
  622. return;
  623. perf_pmu_unregister(&pmu->pmu);
  624. pmu->registered = false;
  625. }
  626. static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
  627. {
  628. int pkg;
  629. for (pkg = 0; pkg < max_packages; pkg++)
  630. kfree(pmu->boxes[pkg]);
  631. kfree(pmu->boxes);
  632. }
  633. static void uncore_type_exit(struct intel_uncore_type *type)
  634. {
  635. struct intel_uncore_pmu *pmu = type->pmus;
  636. int i;
  637. if (pmu) {
  638. for (i = 0; i < type->num_boxes; i++, pmu++) {
  639. uncore_pmu_unregister(pmu);
  640. uncore_free_boxes(pmu);
  641. }
  642. kfree(type->pmus);
  643. type->pmus = NULL;
  644. }
  645. kfree(type->events_group);
  646. type->events_group = NULL;
  647. }
  648. static void uncore_types_exit(struct intel_uncore_type **types)
  649. {
  650. for (; *types; types++)
  651. uncore_type_exit(*types);
  652. }
  653. static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
  654. {
  655. struct intel_uncore_pmu *pmus;
  656. struct attribute_group *attr_group;
  657. struct attribute **attrs;
  658. size_t size;
  659. int i, j;
  660. pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
  661. if (!pmus)
  662. return -ENOMEM;
  663. size = max_packages * sizeof(struct intel_uncore_box *);
  664. for (i = 0; i < type->num_boxes; i++) {
  665. pmus[i].func_id = setid ? i : -1;
  666. pmus[i].pmu_idx = i;
  667. pmus[i].type = type;
  668. pmus[i].boxes = kzalloc(size, GFP_KERNEL);
  669. if (!pmus[i].boxes)
  670. return -ENOMEM;
  671. }
  672. type->pmus = pmus;
  673. type->unconstrainted = (struct event_constraint)
  674. __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
  675. 0, type->num_counters, 0, 0);
  676. if (type->event_descs) {
  677. for (i = 0; type->event_descs[i].attr.attr.name; i++);
  678. attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
  679. sizeof(*attr_group), GFP_KERNEL);
  680. if (!attr_group)
  681. return -ENOMEM;
  682. attrs = (struct attribute **)(attr_group + 1);
  683. attr_group->name = "events";
  684. attr_group->attrs = attrs;
  685. for (j = 0; j < i; j++)
  686. attrs[j] = &type->event_descs[j].attr.attr;
  687. type->events_group = attr_group;
  688. }
  689. type->pmu_group = &uncore_pmu_attr_group;
  690. return 0;
  691. }
  692. static int __init
  693. uncore_types_init(struct intel_uncore_type **types, bool setid)
  694. {
  695. int ret;
  696. for (; *types; types++) {
  697. ret = uncore_type_init(*types, setid);
  698. if (ret)
  699. return ret;
  700. }
  701. return 0;
  702. }
  703. /*
  704. * add a pci uncore device
  705. */
  706. static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  707. {
  708. struct intel_uncore_type *type;
  709. struct intel_uncore_pmu *pmu = NULL;
  710. struct intel_uncore_box *box;
  711. int phys_id, pkg, ret;
  712. phys_id = uncore_pcibus_to_physid(pdev->bus);
  713. if (phys_id < 0)
  714. return -ENODEV;
  715. pkg = topology_phys_to_logical_pkg(phys_id);
  716. if (pkg < 0)
  717. return -EINVAL;
  718. if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
  719. int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
  720. uncore_extra_pci_dev[pkg].dev[idx] = pdev;
  721. pci_set_drvdata(pdev, NULL);
  722. return 0;
  723. }
  724. type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
  725. /*
  726. * Some platforms, e.g. Knights Landing, use a common PCI device ID
  727. * for multiple instances of an uncore PMU device type. We should check
  728. * PCI slot and func to indicate the uncore box.
  729. */
  730. if (id->driver_data & ~0xffff) {
  731. struct pci_driver *pci_drv = pdev->driver;
  732. const struct pci_device_id *ids = pci_drv->id_table;
  733. unsigned int devfn;
  734. while (ids && ids->vendor) {
  735. if ((ids->vendor == pdev->vendor) &&
  736. (ids->device == pdev->device)) {
  737. devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
  738. UNCORE_PCI_DEV_FUNC(ids->driver_data));
  739. if (devfn == pdev->devfn) {
  740. pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
  741. break;
  742. }
  743. }
  744. ids++;
  745. }
  746. if (pmu == NULL)
  747. return -ENODEV;
  748. } else {
  749. /*
  750. * for performance monitoring unit with multiple boxes,
  751. * each box has a different function id.
  752. */
  753. pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
  754. }
  755. if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
  756. return -EINVAL;
  757. box = uncore_alloc_box(type, NUMA_NO_NODE);
  758. if (!box)
  759. return -ENOMEM;
  760. if (pmu->func_id < 0)
  761. pmu->func_id = pdev->devfn;
  762. else
  763. WARN_ON_ONCE(pmu->func_id != pdev->devfn);
  764. atomic_inc(&box->refcnt);
  765. box->pci_phys_id = phys_id;
  766. box->pkgid = pkg;
  767. box->pci_dev = pdev;
  768. box->pmu = pmu;
  769. uncore_box_init(box);
  770. pci_set_drvdata(pdev, box);
  771. pmu->boxes[pkg] = box;
  772. if (atomic_inc_return(&pmu->activeboxes) > 1)
  773. return 0;
  774. /* First active box registers the pmu */
  775. ret = uncore_pmu_register(pmu);
  776. if (ret) {
  777. pci_set_drvdata(pdev, NULL);
  778. pmu->boxes[pkg] = NULL;
  779. uncore_box_exit(box);
  780. kfree(box);
  781. }
  782. return ret;
  783. }
  784. static void uncore_pci_remove(struct pci_dev *pdev)
  785. {
  786. struct intel_uncore_box *box;
  787. struct intel_uncore_pmu *pmu;
  788. int i, phys_id, pkg;
  789. phys_id = uncore_pcibus_to_physid(pdev->bus);
  790. pkg = topology_phys_to_logical_pkg(phys_id);
  791. box = pci_get_drvdata(pdev);
  792. if (!box) {
  793. for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
  794. if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
  795. uncore_extra_pci_dev[pkg].dev[i] = NULL;
  796. break;
  797. }
  798. }
  799. WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
  800. return;
  801. }
  802. pmu = box->pmu;
  803. if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
  804. return;
  805. pci_set_drvdata(pdev, NULL);
  806. pmu->boxes[pkg] = NULL;
  807. if (atomic_dec_return(&pmu->activeboxes) == 0)
  808. uncore_pmu_unregister(pmu);
  809. uncore_box_exit(box);
  810. kfree(box);
  811. }
  812. static int __init uncore_pci_init(void)
  813. {
  814. size_t size;
  815. int ret;
  816. size = max_packages * sizeof(struct pci_extra_dev);
  817. uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
  818. if (!uncore_extra_pci_dev) {
  819. ret = -ENOMEM;
  820. goto err;
  821. }
  822. ret = uncore_types_init(uncore_pci_uncores, false);
  823. if (ret)
  824. goto errtype;
  825. uncore_pci_driver->probe = uncore_pci_probe;
  826. uncore_pci_driver->remove = uncore_pci_remove;
  827. ret = pci_register_driver(uncore_pci_driver);
  828. if (ret)
  829. goto errtype;
  830. pcidrv_registered = true;
  831. return 0;
  832. errtype:
  833. uncore_types_exit(uncore_pci_uncores);
  834. kfree(uncore_extra_pci_dev);
  835. uncore_extra_pci_dev = NULL;
  836. uncore_free_pcibus_map();
  837. err:
  838. uncore_pci_uncores = empty_uncore;
  839. return ret;
  840. }
  841. static void uncore_pci_exit(void)
  842. {
  843. if (pcidrv_registered) {
  844. pcidrv_registered = false;
  845. pci_unregister_driver(uncore_pci_driver);
  846. uncore_types_exit(uncore_pci_uncores);
  847. kfree(uncore_extra_pci_dev);
  848. uncore_free_pcibus_map();
  849. }
  850. }
  851. static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
  852. int new_cpu)
  853. {
  854. struct intel_uncore_pmu *pmu = type->pmus;
  855. struct intel_uncore_box *box;
  856. int i, pkg;
  857. pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
  858. for (i = 0; i < type->num_boxes; i++, pmu++) {
  859. box = pmu->boxes[pkg];
  860. if (!box)
  861. continue;
  862. if (old_cpu < 0) {
  863. WARN_ON_ONCE(box->cpu != -1);
  864. box->cpu = new_cpu;
  865. continue;
  866. }
  867. WARN_ON_ONCE(box->cpu != old_cpu);
  868. box->cpu = -1;
  869. if (new_cpu < 0)
  870. continue;
  871. uncore_pmu_cancel_hrtimer(box);
  872. perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
  873. box->cpu = new_cpu;
  874. }
  875. }
  876. static void uncore_change_context(struct intel_uncore_type **uncores,
  877. int old_cpu, int new_cpu)
  878. {
  879. for (; *uncores; uncores++)
  880. uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
  881. }
  882. static int uncore_event_cpu_offline(unsigned int cpu)
  883. {
  884. struct intel_uncore_type *type, **types = uncore_msr_uncores;
  885. struct intel_uncore_pmu *pmu;
  886. struct intel_uncore_box *box;
  887. int i, pkg, target;
  888. /* Check if exiting cpu is used for collecting uncore events */
  889. if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
  890. goto unref;
  891. /* Find a new cpu to collect uncore events */
  892. target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
  893. /* Migrate uncore events to the new target */
  894. if (target < nr_cpu_ids)
  895. cpumask_set_cpu(target, &uncore_cpu_mask);
  896. else
  897. target = -1;
  898. uncore_change_context(uncore_msr_uncores, cpu, target);
  899. uncore_change_context(uncore_pci_uncores, cpu, target);
  900. unref:
  901. /* Clear the references */
  902. pkg = topology_logical_package_id(cpu);
  903. for (; *types; types++) {
  904. type = *types;
  905. pmu = type->pmus;
  906. for (i = 0; i < type->num_boxes; i++, pmu++) {
  907. box = pmu->boxes[pkg];
  908. if (box && atomic_dec_return(&box->refcnt) == 0)
  909. uncore_box_exit(box);
  910. }
  911. }
  912. return 0;
  913. }
  914. static int allocate_boxes(struct intel_uncore_type **types,
  915. unsigned int pkg, unsigned int cpu)
  916. {
  917. struct intel_uncore_box *box, *tmp;
  918. struct intel_uncore_type *type;
  919. struct intel_uncore_pmu *pmu;
  920. LIST_HEAD(allocated);
  921. int i;
  922. /* Try to allocate all required boxes */
  923. for (; *types; types++) {
  924. type = *types;
  925. pmu = type->pmus;
  926. for (i = 0; i < type->num_boxes; i++, pmu++) {
  927. if (pmu->boxes[pkg])
  928. continue;
  929. box = uncore_alloc_box(type, cpu_to_node(cpu));
  930. if (!box)
  931. goto cleanup;
  932. box->pmu = pmu;
  933. box->pkgid = pkg;
  934. list_add(&box->active_list, &allocated);
  935. }
  936. }
  937. /* Install them in the pmus */
  938. list_for_each_entry_safe(box, tmp, &allocated, active_list) {
  939. list_del_init(&box->active_list);
  940. box->pmu->boxes[pkg] = box;
  941. }
  942. return 0;
  943. cleanup:
  944. list_for_each_entry_safe(box, tmp, &allocated, active_list) {
  945. list_del_init(&box->active_list);
  946. kfree(box);
  947. }
  948. return -ENOMEM;
  949. }
  950. static int uncore_event_cpu_online(unsigned int cpu)
  951. {
  952. struct intel_uncore_type *type, **types = uncore_msr_uncores;
  953. struct intel_uncore_pmu *pmu;
  954. struct intel_uncore_box *box;
  955. int i, ret, pkg, target;
  956. pkg = topology_logical_package_id(cpu);
  957. ret = allocate_boxes(types, pkg, cpu);
  958. if (ret)
  959. return ret;
  960. for (; *types; types++) {
  961. type = *types;
  962. pmu = type->pmus;
  963. for (i = 0; i < type->num_boxes; i++, pmu++) {
  964. box = pmu->boxes[pkg];
  965. if (!box && atomic_inc_return(&box->refcnt) == 1)
  966. uncore_box_init(box);
  967. }
  968. }
  969. /*
  970. * Check if there is an online cpu in the package
  971. * which collects uncore events already.
  972. */
  973. target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
  974. if (target < nr_cpu_ids)
  975. return 0;
  976. cpumask_set_cpu(cpu, &uncore_cpu_mask);
  977. uncore_change_context(uncore_msr_uncores, -1, cpu);
  978. uncore_change_context(uncore_pci_uncores, -1, cpu);
  979. return 0;
  980. }
  981. static int __init type_pmu_register(struct intel_uncore_type *type)
  982. {
  983. int i, ret;
  984. for (i = 0; i < type->num_boxes; i++) {
  985. ret = uncore_pmu_register(&type->pmus[i]);
  986. if (ret)
  987. return ret;
  988. }
  989. return 0;
  990. }
  991. static int __init uncore_msr_pmus_register(void)
  992. {
  993. struct intel_uncore_type **types = uncore_msr_uncores;
  994. int ret;
  995. for (; *types; types++) {
  996. ret = type_pmu_register(*types);
  997. if (ret)
  998. return ret;
  999. }
  1000. return 0;
  1001. }
  1002. static int __init uncore_cpu_init(void)
  1003. {
  1004. int ret;
  1005. ret = uncore_types_init(uncore_msr_uncores, true);
  1006. if (ret)
  1007. goto err;
  1008. ret = uncore_msr_pmus_register();
  1009. if (ret)
  1010. goto err;
  1011. return 0;
  1012. err:
  1013. uncore_types_exit(uncore_msr_uncores);
  1014. uncore_msr_uncores = empty_uncore;
  1015. return ret;
  1016. }
  1017. #define X86_UNCORE_MODEL_MATCH(model, init) \
  1018. { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
  1019. struct intel_uncore_init_fun {
  1020. void (*cpu_init)(void);
  1021. int (*pci_init)(void);
  1022. };
  1023. static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
  1024. .cpu_init = nhm_uncore_cpu_init,
  1025. };
  1026. static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
  1027. .cpu_init = snb_uncore_cpu_init,
  1028. .pci_init = snb_uncore_pci_init,
  1029. };
  1030. static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
  1031. .cpu_init = snb_uncore_cpu_init,
  1032. .pci_init = ivb_uncore_pci_init,
  1033. };
  1034. static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
  1035. .cpu_init = snb_uncore_cpu_init,
  1036. .pci_init = hsw_uncore_pci_init,
  1037. };
  1038. static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
  1039. .cpu_init = snb_uncore_cpu_init,
  1040. .pci_init = bdw_uncore_pci_init,
  1041. };
  1042. static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
  1043. .cpu_init = snbep_uncore_cpu_init,
  1044. .pci_init = snbep_uncore_pci_init,
  1045. };
  1046. static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
  1047. .cpu_init = nhmex_uncore_cpu_init,
  1048. };
  1049. static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
  1050. .cpu_init = ivbep_uncore_cpu_init,
  1051. .pci_init = ivbep_uncore_pci_init,
  1052. };
  1053. static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
  1054. .cpu_init = hswep_uncore_cpu_init,
  1055. .pci_init = hswep_uncore_pci_init,
  1056. };
  1057. static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
  1058. .cpu_init = bdx_uncore_cpu_init,
  1059. .pci_init = bdx_uncore_pci_init,
  1060. };
  1061. static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
  1062. .cpu_init = knl_uncore_cpu_init,
  1063. .pci_init = knl_uncore_pci_init,
  1064. };
  1065. static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
  1066. .cpu_init = skl_uncore_cpu_init,
  1067. .pci_init = skl_uncore_pci_init,
  1068. };
  1069. static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
  1070. .cpu_init = skx_uncore_cpu_init,
  1071. .pci_init = skx_uncore_pci_init,
  1072. };
  1073. static const struct x86_cpu_id intel_uncore_match[] __initconst = {
  1074. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
  1075. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
  1076. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
  1077. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
  1078. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
  1079. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
  1080. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
  1081. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
  1082. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
  1083. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
  1084. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
  1085. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
  1086. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
  1087. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
  1088. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
  1089. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
  1090. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
  1091. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
  1092. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
  1093. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
  1094. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
  1095. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
  1096. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
  1097. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
  1098. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
  1099. {},
  1100. };
  1101. MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
  1102. static int __init intel_uncore_init(void)
  1103. {
  1104. const struct x86_cpu_id *id;
  1105. struct intel_uncore_init_fun *uncore_init;
  1106. int pret = 0, cret = 0, ret;
  1107. id = x86_match_cpu(intel_uncore_match);
  1108. if (!id)
  1109. return -ENODEV;
  1110. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  1111. return -ENODEV;
  1112. max_packages = topology_max_packages();
  1113. uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
  1114. if (uncore_init->pci_init) {
  1115. pret = uncore_init->pci_init();
  1116. if (!pret)
  1117. pret = uncore_pci_init();
  1118. }
  1119. if (uncore_init->cpu_init) {
  1120. uncore_init->cpu_init();
  1121. cret = uncore_cpu_init();
  1122. }
  1123. if (cret && pret)
  1124. return -ENODEV;
  1125. /* Install hotplug callbacks to setup the targets for each package */
  1126. ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
  1127. "perf/x86/intel/uncore:online",
  1128. uncore_event_cpu_online,
  1129. uncore_event_cpu_offline);
  1130. if (ret)
  1131. goto err;
  1132. return 0;
  1133. err:
  1134. uncore_types_exit(uncore_msr_uncores);
  1135. uncore_pci_exit();
  1136. return ret;
  1137. }
  1138. module_init(intel_uncore_init);
  1139. static void __exit intel_uncore_exit(void)
  1140. {
  1141. cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
  1142. uncore_types_exit(uncore_msr_uncores);
  1143. uncore_pci_exit();
  1144. }
  1145. module_exit(intel_uncore_exit);