uncore.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459
  1. #include <linux/module.h>
  2. #include <asm/cpu_device_id.h>
  3. #include <asm/intel-family.h>
  4. #include "uncore.h"
  5. static struct intel_uncore_type *empty_uncore[] = { NULL, };
  6. struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
  7. struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
  8. static bool pcidrv_registered;
  9. struct pci_driver *uncore_pci_driver;
  10. /* pci bus to socket mapping */
  11. DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
  12. struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
  13. struct pci_extra_dev *uncore_extra_pci_dev;
  14. static int max_packages;
  15. /* mask of cpus that collect uncore events */
  16. static cpumask_t uncore_cpu_mask;
  17. /* constraint for the fixed counter */
  18. static struct event_constraint uncore_constraint_fixed =
  19. EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
  20. struct event_constraint uncore_constraint_empty =
  21. EVENT_CONSTRAINT(0, 0, 0);
  22. MODULE_LICENSE("GPL");
  23. static int uncore_pcibus_to_physid(struct pci_bus *bus)
  24. {
  25. struct pci2phy_map *map;
  26. int phys_id = -1;
  27. raw_spin_lock(&pci2phy_map_lock);
  28. list_for_each_entry(map, &pci2phy_map_head, list) {
  29. if (map->segment == pci_domain_nr(bus)) {
  30. phys_id = map->pbus_to_physid[bus->number];
  31. break;
  32. }
  33. }
  34. raw_spin_unlock(&pci2phy_map_lock);
  35. return phys_id;
  36. }
  37. static void uncore_free_pcibus_map(void)
  38. {
  39. struct pci2phy_map *map, *tmp;
  40. list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
  41. list_del(&map->list);
  42. kfree(map);
  43. }
  44. }
  45. struct pci2phy_map *__find_pci2phy_map(int segment)
  46. {
  47. struct pci2phy_map *map, *alloc = NULL;
  48. int i;
  49. lockdep_assert_held(&pci2phy_map_lock);
  50. lookup:
  51. list_for_each_entry(map, &pci2phy_map_head, list) {
  52. if (map->segment == segment)
  53. goto end;
  54. }
  55. if (!alloc) {
  56. raw_spin_unlock(&pci2phy_map_lock);
  57. alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
  58. raw_spin_lock(&pci2phy_map_lock);
  59. if (!alloc)
  60. return NULL;
  61. goto lookup;
  62. }
  63. map = alloc;
  64. alloc = NULL;
  65. map->segment = segment;
  66. for (i = 0; i < 256; i++)
  67. map->pbus_to_physid[i] = -1;
  68. list_add_tail(&map->list, &pci2phy_map_head);
  69. end:
  70. kfree(alloc);
  71. return map;
  72. }
  73. ssize_t uncore_event_show(struct kobject *kobj,
  74. struct kobj_attribute *attr, char *buf)
  75. {
  76. struct uncore_event_desc *event =
  77. container_of(attr, struct uncore_event_desc, attr);
  78. return sprintf(buf, "%s", event->config);
  79. }
  80. struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
  81. {
  82. unsigned int pkgid = topology_logical_package_id(cpu);
  83. /*
  84. * The unsigned check also catches the '-1' return value for non
  85. * existent mappings in the topology map.
  86. */
  87. return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
  88. }
  89. u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  90. {
  91. u64 count;
  92. rdmsrl(event->hw.event_base, count);
  93. return count;
  94. }
  95. /*
  96. * generic get constraint function for shared match/mask registers.
  97. */
  98. struct event_constraint *
  99. uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  100. {
  101. struct intel_uncore_extra_reg *er;
  102. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  103. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  104. unsigned long flags;
  105. bool ok = false;
  106. /*
  107. * reg->alloc can be set due to existing state, so for fake box we
  108. * need to ignore this, otherwise we might fail to allocate proper
  109. * fake state for this extra reg constraint.
  110. */
  111. if (reg1->idx == EXTRA_REG_NONE ||
  112. (!uncore_box_is_fake(box) && reg1->alloc))
  113. return NULL;
  114. er = &box->shared_regs[reg1->idx];
  115. raw_spin_lock_irqsave(&er->lock, flags);
  116. if (!atomic_read(&er->ref) ||
  117. (er->config1 == reg1->config && er->config2 == reg2->config)) {
  118. atomic_inc(&er->ref);
  119. er->config1 = reg1->config;
  120. er->config2 = reg2->config;
  121. ok = true;
  122. }
  123. raw_spin_unlock_irqrestore(&er->lock, flags);
  124. if (ok) {
  125. if (!uncore_box_is_fake(box))
  126. reg1->alloc = 1;
  127. return NULL;
  128. }
  129. return &uncore_constraint_empty;
  130. }
  131. void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  132. {
  133. struct intel_uncore_extra_reg *er;
  134. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  135. /*
  136. * Only put constraint if extra reg was actually allocated. Also
  137. * takes care of event which do not use an extra shared reg.
  138. *
  139. * Also, if this is a fake box we shouldn't touch any event state
  140. * (reg->alloc) and we don't care about leaving inconsistent box
  141. * state either since it will be thrown out.
  142. */
  143. if (uncore_box_is_fake(box) || !reg1->alloc)
  144. return;
  145. er = &box->shared_regs[reg1->idx];
  146. atomic_dec(&er->ref);
  147. reg1->alloc = 0;
  148. }
  149. u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
  150. {
  151. struct intel_uncore_extra_reg *er;
  152. unsigned long flags;
  153. u64 config;
  154. er = &box->shared_regs[idx];
  155. raw_spin_lock_irqsave(&er->lock, flags);
  156. config = er->config;
  157. raw_spin_unlock_irqrestore(&er->lock, flags);
  158. return config;
  159. }
  160. static void uncore_assign_hw_event(struct intel_uncore_box *box,
  161. struct perf_event *event, int idx)
  162. {
  163. struct hw_perf_event *hwc = &event->hw;
  164. hwc->idx = idx;
  165. hwc->last_tag = ++box->tags[idx];
  166. if (uncore_pmc_fixed(hwc->idx)) {
  167. hwc->event_base = uncore_fixed_ctr(box);
  168. hwc->config_base = uncore_fixed_ctl(box);
  169. return;
  170. }
  171. hwc->config_base = uncore_event_ctl(box, hwc->idx);
  172. hwc->event_base = uncore_perf_ctr(box, hwc->idx);
  173. }
  174. void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
  175. {
  176. u64 prev_count, new_count, delta;
  177. int shift;
  178. if (uncore_pmc_freerunning(event->hw.idx))
  179. shift = 64 - uncore_freerunning_bits(box, event);
  180. else if (uncore_pmc_fixed(event->hw.idx))
  181. shift = 64 - uncore_fixed_ctr_bits(box);
  182. else
  183. shift = 64 - uncore_perf_ctr_bits(box);
  184. /* the hrtimer might modify the previous event value */
  185. again:
  186. prev_count = local64_read(&event->hw.prev_count);
  187. new_count = uncore_read_counter(box, event);
  188. if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
  189. goto again;
  190. delta = (new_count << shift) - (prev_count << shift);
  191. delta >>= shift;
  192. local64_add(delta, &event->count);
  193. }
  194. /*
  195. * The overflow interrupt is unavailable for SandyBridge-EP, is broken
  196. * for SandyBridge. So we use hrtimer to periodically poll the counter
  197. * to avoid overflow.
  198. */
  199. static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
  200. {
  201. struct intel_uncore_box *box;
  202. struct perf_event *event;
  203. unsigned long flags;
  204. int bit;
  205. box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
  206. if (!box->n_active || box->cpu != smp_processor_id())
  207. return HRTIMER_NORESTART;
  208. /*
  209. * disable local interrupt to prevent uncore_pmu_event_start/stop
  210. * to interrupt the update process
  211. */
  212. local_irq_save(flags);
  213. /*
  214. * handle boxes with an active event list as opposed to active
  215. * counters
  216. */
  217. list_for_each_entry(event, &box->active_list, active_entry) {
  218. uncore_perf_event_update(box, event);
  219. }
  220. for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
  221. uncore_perf_event_update(box, box->events[bit]);
  222. local_irq_restore(flags);
  223. hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
  224. return HRTIMER_RESTART;
  225. }
  226. void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
  227. {
  228. hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
  229. HRTIMER_MODE_REL_PINNED);
  230. }
  231. void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
  232. {
  233. hrtimer_cancel(&box->hrtimer);
  234. }
  235. static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
  236. {
  237. hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  238. box->hrtimer.function = uncore_pmu_hrtimer;
  239. }
  240. static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
  241. int node)
  242. {
  243. int i, size, numshared = type->num_shared_regs ;
  244. struct intel_uncore_box *box;
  245. size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
  246. box = kzalloc_node(size, GFP_KERNEL, node);
  247. if (!box)
  248. return NULL;
  249. for (i = 0; i < numshared; i++)
  250. raw_spin_lock_init(&box->shared_regs[i].lock);
  251. uncore_pmu_init_hrtimer(box);
  252. box->cpu = -1;
  253. box->pci_phys_id = -1;
  254. box->pkgid = -1;
  255. /* set default hrtimer timeout */
  256. box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
  257. INIT_LIST_HEAD(&box->active_list);
  258. return box;
  259. }
  260. /*
  261. * Using uncore_pmu_event_init pmu event_init callback
  262. * as a detection point for uncore events.
  263. */
  264. static int uncore_pmu_event_init(struct perf_event *event);
  265. static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
  266. {
  267. return &box->pmu->pmu == event->pmu;
  268. }
  269. static int
  270. uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
  271. bool dogrp)
  272. {
  273. struct perf_event *event;
  274. int n, max_count;
  275. max_count = box->pmu->type->num_counters;
  276. if (box->pmu->type->fixed_ctl)
  277. max_count++;
  278. if (box->n_events >= max_count)
  279. return -EINVAL;
  280. n = box->n_events;
  281. if (is_box_event(box, leader)) {
  282. box->event_list[n] = leader;
  283. n++;
  284. }
  285. if (!dogrp)
  286. return n;
  287. for_each_sibling_event(event, leader) {
  288. if (!is_box_event(box, event) ||
  289. event->state <= PERF_EVENT_STATE_OFF)
  290. continue;
  291. if (n >= max_count)
  292. return -EINVAL;
  293. box->event_list[n] = event;
  294. n++;
  295. }
  296. return n;
  297. }
  298. static struct event_constraint *
  299. uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
  300. {
  301. struct intel_uncore_type *type = box->pmu->type;
  302. struct event_constraint *c;
  303. if (type->ops->get_constraint) {
  304. c = type->ops->get_constraint(box, event);
  305. if (c)
  306. return c;
  307. }
  308. if (event->attr.config == UNCORE_FIXED_EVENT)
  309. return &uncore_constraint_fixed;
  310. if (type->constraints) {
  311. for_each_event_constraint(c, type->constraints) {
  312. if ((event->hw.config & c->cmask) == c->code)
  313. return c;
  314. }
  315. }
  316. return &type->unconstrainted;
  317. }
  318. static void uncore_put_event_constraint(struct intel_uncore_box *box,
  319. struct perf_event *event)
  320. {
  321. if (box->pmu->type->ops->put_constraint)
  322. box->pmu->type->ops->put_constraint(box, event);
  323. }
  324. static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
  325. {
  326. unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
  327. struct event_constraint *c;
  328. int i, wmin, wmax, ret = 0;
  329. struct hw_perf_event *hwc;
  330. bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
  331. for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
  332. c = uncore_get_event_constraint(box, box->event_list[i]);
  333. box->event_constraint[i] = c;
  334. wmin = min(wmin, c->weight);
  335. wmax = max(wmax, c->weight);
  336. }
  337. /* fastpath, try to reuse previous register */
  338. for (i = 0; i < n; i++) {
  339. hwc = &box->event_list[i]->hw;
  340. c = box->event_constraint[i];
  341. /* never assigned */
  342. if (hwc->idx == -1)
  343. break;
  344. /* constraint still honored */
  345. if (!test_bit(hwc->idx, c->idxmsk))
  346. break;
  347. /* not already used */
  348. if (test_bit(hwc->idx, used_mask))
  349. break;
  350. __set_bit(hwc->idx, used_mask);
  351. if (assign)
  352. assign[i] = hwc->idx;
  353. }
  354. /* slow path */
  355. if (i != n)
  356. ret = perf_assign_events(box->event_constraint, n,
  357. wmin, wmax, n, assign);
  358. if (!assign || ret) {
  359. for (i = 0; i < n; i++)
  360. uncore_put_event_constraint(box, box->event_list[i]);
  361. }
  362. return ret ? -EINVAL : 0;
  363. }
  364. void uncore_pmu_event_start(struct perf_event *event, int flags)
  365. {
  366. struct intel_uncore_box *box = uncore_event_to_box(event);
  367. int idx = event->hw.idx;
  368. if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
  369. return;
  370. /*
  371. * Free running counter is read-only and always active.
  372. * Use the current counter value as start point.
  373. * There is no overflow interrupt for free running counter.
  374. * Use hrtimer to periodically poll the counter to avoid overflow.
  375. */
  376. if (uncore_pmc_freerunning(event->hw.idx)) {
  377. list_add_tail(&event->active_entry, &box->active_list);
  378. local64_set(&event->hw.prev_count,
  379. uncore_read_counter(box, event));
  380. if (box->n_active++ == 0)
  381. uncore_pmu_start_hrtimer(box);
  382. return;
  383. }
  384. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  385. return;
  386. event->hw.state = 0;
  387. box->events[idx] = event;
  388. box->n_active++;
  389. __set_bit(idx, box->active_mask);
  390. local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
  391. uncore_enable_event(box, event);
  392. if (box->n_active == 1) {
  393. uncore_enable_box(box);
  394. uncore_pmu_start_hrtimer(box);
  395. }
  396. }
  397. void uncore_pmu_event_stop(struct perf_event *event, int flags)
  398. {
  399. struct intel_uncore_box *box = uncore_event_to_box(event);
  400. struct hw_perf_event *hwc = &event->hw;
  401. /* Cannot disable free running counter which is read-only */
  402. if (uncore_pmc_freerunning(hwc->idx)) {
  403. list_del(&event->active_entry);
  404. if (--box->n_active == 0)
  405. uncore_pmu_cancel_hrtimer(box);
  406. uncore_perf_event_update(box, event);
  407. return;
  408. }
  409. if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
  410. uncore_disable_event(box, event);
  411. box->n_active--;
  412. box->events[hwc->idx] = NULL;
  413. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  414. hwc->state |= PERF_HES_STOPPED;
  415. if (box->n_active == 0) {
  416. uncore_disable_box(box);
  417. uncore_pmu_cancel_hrtimer(box);
  418. }
  419. }
  420. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  421. /*
  422. * Drain the remaining delta count out of a event
  423. * that we are disabling:
  424. */
  425. uncore_perf_event_update(box, event);
  426. hwc->state |= PERF_HES_UPTODATE;
  427. }
  428. }
  429. int uncore_pmu_event_add(struct perf_event *event, int flags)
  430. {
  431. struct intel_uncore_box *box = uncore_event_to_box(event);
  432. struct hw_perf_event *hwc = &event->hw;
  433. int assign[UNCORE_PMC_IDX_MAX];
  434. int i, n, ret;
  435. if (!box)
  436. return -ENODEV;
  437. /*
  438. * The free funning counter is assigned in event_init().
  439. * The free running counter event and free running counter
  440. * are 1:1 mapped. It doesn't need to be tracked in event_list.
  441. */
  442. if (uncore_pmc_freerunning(hwc->idx)) {
  443. if (flags & PERF_EF_START)
  444. uncore_pmu_event_start(event, 0);
  445. return 0;
  446. }
  447. ret = n = uncore_collect_events(box, event, false);
  448. if (ret < 0)
  449. return ret;
  450. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  451. if (!(flags & PERF_EF_START))
  452. hwc->state |= PERF_HES_ARCH;
  453. ret = uncore_assign_events(box, assign, n);
  454. if (ret)
  455. return ret;
  456. /* save events moving to new counters */
  457. for (i = 0; i < box->n_events; i++) {
  458. event = box->event_list[i];
  459. hwc = &event->hw;
  460. if (hwc->idx == assign[i] &&
  461. hwc->last_tag == box->tags[assign[i]])
  462. continue;
  463. /*
  464. * Ensure we don't accidentally enable a stopped
  465. * counter simply because we rescheduled.
  466. */
  467. if (hwc->state & PERF_HES_STOPPED)
  468. hwc->state |= PERF_HES_ARCH;
  469. uncore_pmu_event_stop(event, PERF_EF_UPDATE);
  470. }
  471. /* reprogram moved events into new counters */
  472. for (i = 0; i < n; i++) {
  473. event = box->event_list[i];
  474. hwc = &event->hw;
  475. if (hwc->idx != assign[i] ||
  476. hwc->last_tag != box->tags[assign[i]])
  477. uncore_assign_hw_event(box, event, assign[i]);
  478. else if (i < box->n_events)
  479. continue;
  480. if (hwc->state & PERF_HES_ARCH)
  481. continue;
  482. uncore_pmu_event_start(event, 0);
  483. }
  484. box->n_events = n;
  485. return 0;
  486. }
  487. void uncore_pmu_event_del(struct perf_event *event, int flags)
  488. {
  489. struct intel_uncore_box *box = uncore_event_to_box(event);
  490. int i;
  491. uncore_pmu_event_stop(event, PERF_EF_UPDATE);
  492. /*
  493. * The event for free running counter is not tracked by event_list.
  494. * It doesn't need to force event->hw.idx = -1 to reassign the counter.
  495. * Because the event and the free running counter are 1:1 mapped.
  496. */
  497. if (uncore_pmc_freerunning(event->hw.idx))
  498. return;
  499. for (i = 0; i < box->n_events; i++) {
  500. if (event == box->event_list[i]) {
  501. uncore_put_event_constraint(box, event);
  502. for (++i; i < box->n_events; i++)
  503. box->event_list[i - 1] = box->event_list[i];
  504. --box->n_events;
  505. break;
  506. }
  507. }
  508. event->hw.idx = -1;
  509. event->hw.last_tag = ~0ULL;
  510. }
  511. void uncore_pmu_event_read(struct perf_event *event)
  512. {
  513. struct intel_uncore_box *box = uncore_event_to_box(event);
  514. uncore_perf_event_update(box, event);
  515. }
  516. /*
  517. * validation ensures the group can be loaded onto the
  518. * PMU if it was the only group available.
  519. */
  520. static int uncore_validate_group(struct intel_uncore_pmu *pmu,
  521. struct perf_event *event)
  522. {
  523. struct perf_event *leader = event->group_leader;
  524. struct intel_uncore_box *fake_box;
  525. int ret = -EINVAL, n;
  526. /* The free running counter is always active. */
  527. if (uncore_pmc_freerunning(event->hw.idx))
  528. return 0;
  529. fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
  530. if (!fake_box)
  531. return -ENOMEM;
  532. fake_box->pmu = pmu;
  533. /*
  534. * the event is not yet connected with its
  535. * siblings therefore we must first collect
  536. * existing siblings, then add the new event
  537. * before we can simulate the scheduling
  538. */
  539. n = uncore_collect_events(fake_box, leader, true);
  540. if (n < 0)
  541. goto out;
  542. fake_box->n_events = n;
  543. n = uncore_collect_events(fake_box, event, false);
  544. if (n < 0)
  545. goto out;
  546. fake_box->n_events = n;
  547. ret = uncore_assign_events(fake_box, NULL, n);
  548. out:
  549. kfree(fake_box);
  550. return ret;
  551. }
  552. static int uncore_pmu_event_init(struct perf_event *event)
  553. {
  554. struct intel_uncore_pmu *pmu;
  555. struct intel_uncore_box *box;
  556. struct hw_perf_event *hwc = &event->hw;
  557. int ret;
  558. if (event->attr.type != event->pmu->type)
  559. return -ENOENT;
  560. pmu = uncore_event_to_pmu(event);
  561. /* no device found for this pmu */
  562. if (pmu->func_id < 0)
  563. return -ENOENT;
  564. /*
  565. * Uncore PMU does measure at all privilege level all the time.
  566. * So it doesn't make sense to specify any exclude bits.
  567. */
  568. if (event->attr.exclude_user || event->attr.exclude_kernel ||
  569. event->attr.exclude_hv || event->attr.exclude_idle)
  570. return -EINVAL;
  571. /* Sampling not supported yet */
  572. if (hwc->sample_period)
  573. return -EINVAL;
  574. /*
  575. * Place all uncore events for a particular physical package
  576. * onto a single cpu
  577. */
  578. if (event->cpu < 0)
  579. return -EINVAL;
  580. box = uncore_pmu_to_box(pmu, event->cpu);
  581. if (!box || box->cpu < 0)
  582. return -EINVAL;
  583. event->cpu = box->cpu;
  584. event->pmu_private = box;
  585. event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
  586. event->hw.idx = -1;
  587. event->hw.last_tag = ~0ULL;
  588. event->hw.extra_reg.idx = EXTRA_REG_NONE;
  589. event->hw.branch_reg.idx = EXTRA_REG_NONE;
  590. if (event->attr.config == UNCORE_FIXED_EVENT) {
  591. /* no fixed counter */
  592. if (!pmu->type->fixed_ctl)
  593. return -EINVAL;
  594. /*
  595. * if there is only one fixed counter, only the first pmu
  596. * can access the fixed counter
  597. */
  598. if (pmu->type->single_fixed && pmu->pmu_idx > 0)
  599. return -EINVAL;
  600. /* fixed counters have event field hardcoded to zero */
  601. hwc->config = 0ULL;
  602. } else if (is_freerunning_event(event)) {
  603. if (!check_valid_freerunning_event(box, event))
  604. return -EINVAL;
  605. event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
  606. /*
  607. * The free running counter event and free running counter
  608. * are always 1:1 mapped.
  609. * The free running counter is always active.
  610. * Assign the free running counter here.
  611. */
  612. event->hw.event_base = uncore_freerunning_counter(box, event);
  613. } else {
  614. hwc->config = event->attr.config &
  615. (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
  616. if (pmu->type->ops->hw_config) {
  617. ret = pmu->type->ops->hw_config(box, event);
  618. if (ret)
  619. return ret;
  620. }
  621. }
  622. if (event->group_leader != event)
  623. ret = uncore_validate_group(pmu, event);
  624. else
  625. ret = 0;
  626. return ret;
  627. }
  628. static ssize_t uncore_get_attr_cpumask(struct device *dev,
  629. struct device_attribute *attr, char *buf)
  630. {
  631. return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
  632. }
  633. static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
  634. static struct attribute *uncore_pmu_attrs[] = {
  635. &dev_attr_cpumask.attr,
  636. NULL,
  637. };
  638. static const struct attribute_group uncore_pmu_attr_group = {
  639. .attrs = uncore_pmu_attrs,
  640. };
  641. static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
  642. {
  643. int ret;
  644. if (!pmu->type->pmu) {
  645. pmu->pmu = (struct pmu) {
  646. .attr_groups = pmu->type->attr_groups,
  647. .task_ctx_nr = perf_invalid_context,
  648. .event_init = uncore_pmu_event_init,
  649. .add = uncore_pmu_event_add,
  650. .del = uncore_pmu_event_del,
  651. .start = uncore_pmu_event_start,
  652. .stop = uncore_pmu_event_stop,
  653. .read = uncore_pmu_event_read,
  654. .module = THIS_MODULE,
  655. };
  656. } else {
  657. pmu->pmu = *pmu->type->pmu;
  658. pmu->pmu.attr_groups = pmu->type->attr_groups;
  659. }
  660. if (pmu->type->num_boxes == 1) {
  661. if (strlen(pmu->type->name) > 0)
  662. sprintf(pmu->name, "uncore_%s", pmu->type->name);
  663. else
  664. sprintf(pmu->name, "uncore");
  665. } else {
  666. sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
  667. pmu->pmu_idx);
  668. }
  669. ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
  670. if (!ret)
  671. pmu->registered = true;
  672. return ret;
  673. }
  674. static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
  675. {
  676. if (!pmu->registered)
  677. return;
  678. perf_pmu_unregister(&pmu->pmu);
  679. pmu->registered = false;
  680. }
  681. static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
  682. {
  683. int pkg;
  684. for (pkg = 0; pkg < max_packages; pkg++)
  685. kfree(pmu->boxes[pkg]);
  686. kfree(pmu->boxes);
  687. }
  688. static void uncore_type_exit(struct intel_uncore_type *type)
  689. {
  690. struct intel_uncore_pmu *pmu = type->pmus;
  691. int i;
  692. if (pmu) {
  693. for (i = 0; i < type->num_boxes; i++, pmu++) {
  694. uncore_pmu_unregister(pmu);
  695. uncore_free_boxes(pmu);
  696. }
  697. kfree(type->pmus);
  698. type->pmus = NULL;
  699. }
  700. kfree(type->events_group);
  701. type->events_group = NULL;
  702. }
  703. static void uncore_types_exit(struct intel_uncore_type **types)
  704. {
  705. for (; *types; types++)
  706. uncore_type_exit(*types);
  707. }
  708. static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
  709. {
  710. struct intel_uncore_pmu *pmus;
  711. size_t size;
  712. int i, j;
  713. pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
  714. if (!pmus)
  715. return -ENOMEM;
  716. size = max_packages * sizeof(struct intel_uncore_box *);
  717. for (i = 0; i < type->num_boxes; i++) {
  718. pmus[i].func_id = setid ? i : -1;
  719. pmus[i].pmu_idx = i;
  720. pmus[i].type = type;
  721. pmus[i].boxes = kzalloc(size, GFP_KERNEL);
  722. if (!pmus[i].boxes)
  723. goto err;
  724. }
  725. type->pmus = pmus;
  726. type->unconstrainted = (struct event_constraint)
  727. __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
  728. 0, type->num_counters, 0, 0);
  729. if (type->event_descs) {
  730. struct {
  731. struct attribute_group group;
  732. struct attribute *attrs[];
  733. } *attr_group;
  734. for (i = 0; type->event_descs[i].attr.attr.name; i++);
  735. attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
  736. GFP_KERNEL);
  737. if (!attr_group)
  738. goto err;
  739. attr_group->group.name = "events";
  740. attr_group->group.attrs = attr_group->attrs;
  741. for (j = 0; j < i; j++)
  742. attr_group->attrs[j] = &type->event_descs[j].attr.attr;
  743. type->events_group = &attr_group->group;
  744. }
  745. type->pmu_group = &uncore_pmu_attr_group;
  746. return 0;
  747. err:
  748. for (i = 0; i < type->num_boxes; i++)
  749. kfree(pmus[i].boxes);
  750. kfree(pmus);
  751. return -ENOMEM;
  752. }
  753. static int __init
  754. uncore_types_init(struct intel_uncore_type **types, bool setid)
  755. {
  756. int ret;
  757. for (; *types; types++) {
  758. ret = uncore_type_init(*types, setid);
  759. if (ret)
  760. return ret;
  761. }
  762. return 0;
  763. }
  764. /*
  765. * add a pci uncore device
  766. */
  767. static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  768. {
  769. struct intel_uncore_type *type;
  770. struct intel_uncore_pmu *pmu = NULL;
  771. struct intel_uncore_box *box;
  772. int phys_id, pkg, ret;
  773. phys_id = uncore_pcibus_to_physid(pdev->bus);
  774. if (phys_id < 0)
  775. return -ENODEV;
  776. pkg = topology_phys_to_logical_pkg(phys_id);
  777. if (pkg < 0)
  778. return -EINVAL;
  779. if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
  780. int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
  781. uncore_extra_pci_dev[pkg].dev[idx] = pdev;
  782. pci_set_drvdata(pdev, NULL);
  783. return 0;
  784. }
  785. type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
  786. /*
  787. * Some platforms, e.g. Knights Landing, use a common PCI device ID
  788. * for multiple instances of an uncore PMU device type. We should check
  789. * PCI slot and func to indicate the uncore box.
  790. */
  791. if (id->driver_data & ~0xffff) {
  792. struct pci_driver *pci_drv = pdev->driver;
  793. const struct pci_device_id *ids = pci_drv->id_table;
  794. unsigned int devfn;
  795. while (ids && ids->vendor) {
  796. if ((ids->vendor == pdev->vendor) &&
  797. (ids->device == pdev->device)) {
  798. devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
  799. UNCORE_PCI_DEV_FUNC(ids->driver_data));
  800. if (devfn == pdev->devfn) {
  801. pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
  802. break;
  803. }
  804. }
  805. ids++;
  806. }
  807. if (pmu == NULL)
  808. return -ENODEV;
  809. } else {
  810. /*
  811. * for performance monitoring unit with multiple boxes,
  812. * each box has a different function id.
  813. */
  814. pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
  815. }
  816. if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
  817. return -EINVAL;
  818. box = uncore_alloc_box(type, NUMA_NO_NODE);
  819. if (!box)
  820. return -ENOMEM;
  821. if (pmu->func_id < 0)
  822. pmu->func_id = pdev->devfn;
  823. else
  824. WARN_ON_ONCE(pmu->func_id != pdev->devfn);
  825. atomic_inc(&box->refcnt);
  826. box->pci_phys_id = phys_id;
  827. box->pkgid = pkg;
  828. box->pci_dev = pdev;
  829. box->pmu = pmu;
  830. uncore_box_init(box);
  831. pci_set_drvdata(pdev, box);
  832. pmu->boxes[pkg] = box;
  833. if (atomic_inc_return(&pmu->activeboxes) > 1)
  834. return 0;
  835. /* First active box registers the pmu */
  836. ret = uncore_pmu_register(pmu);
  837. if (ret) {
  838. pci_set_drvdata(pdev, NULL);
  839. pmu->boxes[pkg] = NULL;
  840. uncore_box_exit(box);
  841. kfree(box);
  842. }
  843. return ret;
  844. }
  845. static void uncore_pci_remove(struct pci_dev *pdev)
  846. {
  847. struct intel_uncore_box *box;
  848. struct intel_uncore_pmu *pmu;
  849. int i, phys_id, pkg;
  850. phys_id = uncore_pcibus_to_physid(pdev->bus);
  851. box = pci_get_drvdata(pdev);
  852. if (!box) {
  853. pkg = topology_phys_to_logical_pkg(phys_id);
  854. for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
  855. if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
  856. uncore_extra_pci_dev[pkg].dev[i] = NULL;
  857. break;
  858. }
  859. }
  860. WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
  861. return;
  862. }
  863. pmu = box->pmu;
  864. if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
  865. return;
  866. pci_set_drvdata(pdev, NULL);
  867. pmu->boxes[box->pkgid] = NULL;
  868. if (atomic_dec_return(&pmu->activeboxes) == 0)
  869. uncore_pmu_unregister(pmu);
  870. uncore_box_exit(box);
  871. kfree(box);
  872. }
  873. static int __init uncore_pci_init(void)
  874. {
  875. size_t size;
  876. int ret;
  877. size = max_packages * sizeof(struct pci_extra_dev);
  878. uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
  879. if (!uncore_extra_pci_dev) {
  880. ret = -ENOMEM;
  881. goto err;
  882. }
  883. ret = uncore_types_init(uncore_pci_uncores, false);
  884. if (ret)
  885. goto errtype;
  886. uncore_pci_driver->probe = uncore_pci_probe;
  887. uncore_pci_driver->remove = uncore_pci_remove;
  888. ret = pci_register_driver(uncore_pci_driver);
  889. if (ret)
  890. goto errtype;
  891. pcidrv_registered = true;
  892. return 0;
  893. errtype:
  894. uncore_types_exit(uncore_pci_uncores);
  895. kfree(uncore_extra_pci_dev);
  896. uncore_extra_pci_dev = NULL;
  897. uncore_free_pcibus_map();
  898. err:
  899. uncore_pci_uncores = empty_uncore;
  900. return ret;
  901. }
  902. static void uncore_pci_exit(void)
  903. {
  904. if (pcidrv_registered) {
  905. pcidrv_registered = false;
  906. pci_unregister_driver(uncore_pci_driver);
  907. uncore_types_exit(uncore_pci_uncores);
  908. kfree(uncore_extra_pci_dev);
  909. uncore_free_pcibus_map();
  910. }
  911. }
  912. static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
  913. int new_cpu)
  914. {
  915. struct intel_uncore_pmu *pmu = type->pmus;
  916. struct intel_uncore_box *box;
  917. int i, pkg;
  918. pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
  919. for (i = 0; i < type->num_boxes; i++, pmu++) {
  920. box = pmu->boxes[pkg];
  921. if (!box)
  922. continue;
  923. if (old_cpu < 0) {
  924. WARN_ON_ONCE(box->cpu != -1);
  925. box->cpu = new_cpu;
  926. continue;
  927. }
  928. WARN_ON_ONCE(box->cpu != old_cpu);
  929. box->cpu = -1;
  930. if (new_cpu < 0)
  931. continue;
  932. uncore_pmu_cancel_hrtimer(box);
  933. perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
  934. box->cpu = new_cpu;
  935. }
  936. }
  937. static void uncore_change_context(struct intel_uncore_type **uncores,
  938. int old_cpu, int new_cpu)
  939. {
  940. for (; *uncores; uncores++)
  941. uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
  942. }
  943. static int uncore_event_cpu_offline(unsigned int cpu)
  944. {
  945. struct intel_uncore_type *type, **types = uncore_msr_uncores;
  946. struct intel_uncore_pmu *pmu;
  947. struct intel_uncore_box *box;
  948. int i, pkg, target;
  949. /* Check if exiting cpu is used for collecting uncore events */
  950. if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
  951. goto unref;
  952. /* Find a new cpu to collect uncore events */
  953. target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
  954. /* Migrate uncore events to the new target */
  955. if (target < nr_cpu_ids)
  956. cpumask_set_cpu(target, &uncore_cpu_mask);
  957. else
  958. target = -1;
  959. uncore_change_context(uncore_msr_uncores, cpu, target);
  960. uncore_change_context(uncore_pci_uncores, cpu, target);
  961. unref:
  962. /* Clear the references */
  963. pkg = topology_logical_package_id(cpu);
  964. for (; *types; types++) {
  965. type = *types;
  966. pmu = type->pmus;
  967. for (i = 0; i < type->num_boxes; i++, pmu++) {
  968. box = pmu->boxes[pkg];
  969. if (box && atomic_dec_return(&box->refcnt) == 0)
  970. uncore_box_exit(box);
  971. }
  972. }
  973. return 0;
  974. }
  975. static int allocate_boxes(struct intel_uncore_type **types,
  976. unsigned int pkg, unsigned int cpu)
  977. {
  978. struct intel_uncore_box *box, *tmp;
  979. struct intel_uncore_type *type;
  980. struct intel_uncore_pmu *pmu;
  981. LIST_HEAD(allocated);
  982. int i;
  983. /* Try to allocate all required boxes */
  984. for (; *types; types++) {
  985. type = *types;
  986. pmu = type->pmus;
  987. for (i = 0; i < type->num_boxes; i++, pmu++) {
  988. if (pmu->boxes[pkg])
  989. continue;
  990. box = uncore_alloc_box(type, cpu_to_node(cpu));
  991. if (!box)
  992. goto cleanup;
  993. box->pmu = pmu;
  994. box->pkgid = pkg;
  995. list_add(&box->active_list, &allocated);
  996. }
  997. }
  998. /* Install them in the pmus */
  999. list_for_each_entry_safe(box, tmp, &allocated, active_list) {
  1000. list_del_init(&box->active_list);
  1001. box->pmu->boxes[pkg] = box;
  1002. }
  1003. return 0;
  1004. cleanup:
  1005. list_for_each_entry_safe(box, tmp, &allocated, active_list) {
  1006. list_del_init(&box->active_list);
  1007. kfree(box);
  1008. }
  1009. return -ENOMEM;
  1010. }
  1011. static int uncore_event_cpu_online(unsigned int cpu)
  1012. {
  1013. struct intel_uncore_type *type, **types = uncore_msr_uncores;
  1014. struct intel_uncore_pmu *pmu;
  1015. struct intel_uncore_box *box;
  1016. int i, ret, pkg, target;
  1017. pkg = topology_logical_package_id(cpu);
  1018. ret = allocate_boxes(types, pkg, cpu);
  1019. if (ret)
  1020. return ret;
  1021. for (; *types; types++) {
  1022. type = *types;
  1023. pmu = type->pmus;
  1024. for (i = 0; i < type->num_boxes; i++, pmu++) {
  1025. box = pmu->boxes[pkg];
  1026. if (box && atomic_inc_return(&box->refcnt) == 1)
  1027. uncore_box_init(box);
  1028. }
  1029. }
  1030. /*
  1031. * Check if there is an online cpu in the package
  1032. * which collects uncore events already.
  1033. */
  1034. target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
  1035. if (target < nr_cpu_ids)
  1036. return 0;
  1037. cpumask_set_cpu(cpu, &uncore_cpu_mask);
  1038. uncore_change_context(uncore_msr_uncores, -1, cpu);
  1039. uncore_change_context(uncore_pci_uncores, -1, cpu);
  1040. return 0;
  1041. }
  1042. static int __init type_pmu_register(struct intel_uncore_type *type)
  1043. {
  1044. int i, ret;
  1045. for (i = 0; i < type->num_boxes; i++) {
  1046. ret = uncore_pmu_register(&type->pmus[i]);
  1047. if (ret)
  1048. return ret;
  1049. }
  1050. return 0;
  1051. }
  1052. static int __init uncore_msr_pmus_register(void)
  1053. {
  1054. struct intel_uncore_type **types = uncore_msr_uncores;
  1055. int ret;
  1056. for (; *types; types++) {
  1057. ret = type_pmu_register(*types);
  1058. if (ret)
  1059. return ret;
  1060. }
  1061. return 0;
  1062. }
  1063. static int __init uncore_cpu_init(void)
  1064. {
  1065. int ret;
  1066. ret = uncore_types_init(uncore_msr_uncores, true);
  1067. if (ret)
  1068. goto err;
  1069. ret = uncore_msr_pmus_register();
  1070. if (ret)
  1071. goto err;
  1072. return 0;
  1073. err:
  1074. uncore_types_exit(uncore_msr_uncores);
  1075. uncore_msr_uncores = empty_uncore;
  1076. return ret;
  1077. }
  1078. #define X86_UNCORE_MODEL_MATCH(model, init) \
  1079. { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
  1080. struct intel_uncore_init_fun {
  1081. void (*cpu_init)(void);
  1082. int (*pci_init)(void);
  1083. };
  1084. static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
  1085. .cpu_init = nhm_uncore_cpu_init,
  1086. };
  1087. static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
  1088. .cpu_init = snb_uncore_cpu_init,
  1089. .pci_init = snb_uncore_pci_init,
  1090. };
  1091. static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
  1092. .cpu_init = snb_uncore_cpu_init,
  1093. .pci_init = ivb_uncore_pci_init,
  1094. };
  1095. static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
  1096. .cpu_init = snb_uncore_cpu_init,
  1097. .pci_init = hsw_uncore_pci_init,
  1098. };
  1099. static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
  1100. .cpu_init = snb_uncore_cpu_init,
  1101. .pci_init = bdw_uncore_pci_init,
  1102. };
  1103. static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
  1104. .cpu_init = snbep_uncore_cpu_init,
  1105. .pci_init = snbep_uncore_pci_init,
  1106. };
  1107. static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
  1108. .cpu_init = nhmex_uncore_cpu_init,
  1109. };
  1110. static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
  1111. .cpu_init = ivbep_uncore_cpu_init,
  1112. .pci_init = ivbep_uncore_pci_init,
  1113. };
  1114. static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
  1115. .cpu_init = hswep_uncore_cpu_init,
  1116. .pci_init = hswep_uncore_pci_init,
  1117. };
  1118. static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
  1119. .cpu_init = bdx_uncore_cpu_init,
  1120. .pci_init = bdx_uncore_pci_init,
  1121. };
  1122. static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
  1123. .cpu_init = knl_uncore_cpu_init,
  1124. .pci_init = knl_uncore_pci_init,
  1125. };
  1126. static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
  1127. .cpu_init = skl_uncore_cpu_init,
  1128. .pci_init = skl_uncore_pci_init,
  1129. };
  1130. static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
  1131. .cpu_init = skx_uncore_cpu_init,
  1132. .pci_init = skx_uncore_pci_init,
  1133. };
  1134. static const struct x86_cpu_id intel_uncore_match[] __initconst = {
  1135. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
  1136. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
  1137. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
  1138. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
  1139. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
  1140. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
  1141. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
  1142. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
  1143. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
  1144. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
  1145. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
  1146. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
  1147. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
  1148. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
  1149. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
  1150. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
  1151. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
  1152. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
  1153. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
  1154. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
  1155. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
  1156. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
  1157. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
  1158. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
  1159. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
  1160. {},
  1161. };
  1162. MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
  1163. static int __init intel_uncore_init(void)
  1164. {
  1165. const struct x86_cpu_id *id;
  1166. struct intel_uncore_init_fun *uncore_init;
  1167. int pret = 0, cret = 0, ret;
  1168. id = x86_match_cpu(intel_uncore_match);
  1169. if (!id)
  1170. return -ENODEV;
  1171. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  1172. return -ENODEV;
  1173. max_packages = topology_max_packages();
  1174. uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
  1175. if (uncore_init->pci_init) {
  1176. pret = uncore_init->pci_init();
  1177. if (!pret)
  1178. pret = uncore_pci_init();
  1179. }
  1180. if (uncore_init->cpu_init) {
  1181. uncore_init->cpu_init();
  1182. cret = uncore_cpu_init();
  1183. }
  1184. if (cret && pret)
  1185. return -ENODEV;
  1186. /* Install hotplug callbacks to setup the targets for each package */
  1187. ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
  1188. "perf/x86/intel/uncore:online",
  1189. uncore_event_cpu_online,
  1190. uncore_event_cpu_offline);
  1191. if (ret)
  1192. goto err;
  1193. return 0;
  1194. err:
  1195. uncore_types_exit(uncore_msr_uncores);
  1196. uncore_pci_exit();
  1197. return ret;
  1198. }
  1199. module_init(intel_uncore_init);
  1200. static void __exit intel_uncore_exit(void)
  1201. {
  1202. cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
  1203. uncore_types_exit(uncore_msr_uncores);
  1204. uncore_pci_exit();
  1205. }
  1206. module_exit(intel_uncore_exit);