uncore.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/slab.h>
  3. #include <linux/pci.h>
  4. #include <asm/apicdef.h>
  5. #include <linux/perf_event.h>
  6. #include "../perf_event.h"
  7. #define UNCORE_PMU_NAME_LEN 32
  8. #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
  9. #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
  10. #define UNCORE_FIXED_EVENT 0xff
  11. #define UNCORE_PMC_IDX_MAX_GENERIC 8
  12. #define UNCORE_PMC_IDX_MAX_FIXED 1
  13. #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
  14. #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
  15. #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
  16. UNCORE_PMC_IDX_MAX_FIXED)
  17. #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
  18. UNCORE_PMC_IDX_MAX_FREERUNNING)
  19. #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
  20. ((dev << 24) | (func << 16) | (type << 8) | idx)
  21. #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
  22. #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
  23. #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
  24. #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
  25. #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
  26. #define UNCORE_EXTRA_PCI_DEV 0xff
  27. #define UNCORE_EXTRA_PCI_DEV_MAX 4
  28. #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
  29. struct pci_extra_dev {
  30. struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
  31. };
  32. struct intel_uncore_ops;
  33. struct intel_uncore_pmu;
  34. struct intel_uncore_box;
  35. struct uncore_event_desc;
  36. struct freerunning_counters;
  37. struct intel_uncore_type {
  38. const char *name;
  39. int num_counters;
  40. int num_boxes;
  41. int perf_ctr_bits;
  42. int fixed_ctr_bits;
  43. int num_freerunning_types;
  44. unsigned perf_ctr;
  45. unsigned event_ctl;
  46. unsigned event_mask;
  47. unsigned event_mask_ext;
  48. unsigned fixed_ctr;
  49. unsigned fixed_ctl;
  50. unsigned box_ctl;
  51. unsigned msr_offset;
  52. unsigned num_shared_regs:8;
  53. unsigned single_fixed:1;
  54. unsigned pair_ctr_ctl:1;
  55. unsigned *msr_offsets;
  56. struct event_constraint unconstrainted;
  57. struct event_constraint *constraints;
  58. struct intel_uncore_pmu *pmus;
  59. struct intel_uncore_ops *ops;
  60. struct uncore_event_desc *event_descs;
  61. struct freerunning_counters *freerunning;
  62. const struct attribute_group *attr_groups[4];
  63. struct pmu *pmu; /* for custom pmu ops */
  64. };
  65. #define pmu_group attr_groups[0]
  66. #define format_group attr_groups[1]
  67. #define events_group attr_groups[2]
  68. struct intel_uncore_ops {
  69. void (*init_box)(struct intel_uncore_box *);
  70. void (*exit_box)(struct intel_uncore_box *);
  71. void (*disable_box)(struct intel_uncore_box *);
  72. void (*enable_box)(struct intel_uncore_box *);
  73. void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
  74. void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
  75. u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
  76. int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
  77. struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
  78. struct perf_event *);
  79. void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
  80. };
  81. struct intel_uncore_pmu {
  82. struct pmu pmu;
  83. char name[UNCORE_PMU_NAME_LEN];
  84. int pmu_idx;
  85. int func_id;
  86. bool registered;
  87. atomic_t activeboxes;
  88. struct intel_uncore_type *type;
  89. struct intel_uncore_box **boxes;
  90. };
  91. struct intel_uncore_extra_reg {
  92. raw_spinlock_t lock;
  93. u64 config, config1, config2;
  94. atomic_t ref;
  95. };
  96. struct intel_uncore_box {
  97. int pci_phys_id;
  98. int pkgid; /* Logical package ID */
  99. int n_active; /* number of active events */
  100. int n_events;
  101. int cpu; /* cpu to collect events */
  102. unsigned long flags;
  103. atomic_t refcnt;
  104. struct perf_event *events[UNCORE_PMC_IDX_MAX];
  105. struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
  106. struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
  107. unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
  108. u64 tags[UNCORE_PMC_IDX_MAX];
  109. struct pci_dev *pci_dev;
  110. struct intel_uncore_pmu *pmu;
  111. u64 hrtimer_duration; /* hrtimer timeout for this box */
  112. struct hrtimer hrtimer;
  113. struct list_head list;
  114. struct list_head active_list;
  115. void *io_addr;
  116. struct intel_uncore_extra_reg shared_regs[0];
  117. };
  118. /* CFL uncore 8th cbox MSRs */
  119. #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
  120. #define CFL_UNC_CBO_7_PER_CTR0 0xf76
  121. #define UNCORE_BOX_FLAG_INITIATED 0
  122. /* event config registers are 8-byte apart */
  123. #define UNCORE_BOX_FLAG_CTL_OFFS8 1
  124. /* CFL 8th CBOX has different MSR space */
  125. #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
  126. struct uncore_event_desc {
  127. struct kobj_attribute attr;
  128. const char *config;
  129. };
  130. struct freerunning_counters {
  131. unsigned int counter_base;
  132. unsigned int counter_offset;
  133. unsigned int box_offset;
  134. unsigned int num_counters;
  135. unsigned int bits;
  136. };
  137. struct pci2phy_map {
  138. struct list_head list;
  139. int segment;
  140. int pbus_to_physid[256];
  141. };
  142. struct pci2phy_map *__find_pci2phy_map(int segment);
  143. ssize_t uncore_event_show(struct kobject *kobj,
  144. struct kobj_attribute *attr, char *buf);
  145. #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
  146. { \
  147. .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
  148. .config = _config, \
  149. }
  150. #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
  151. static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
  152. struct kobj_attribute *attr, \
  153. char *page) \
  154. { \
  155. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  156. return sprintf(page, _format "\n"); \
  157. } \
  158. static struct kobj_attribute format_attr_##_var = \
  159. __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
  160. static inline bool uncore_pmc_fixed(int idx)
  161. {
  162. return idx == UNCORE_PMC_IDX_FIXED;
  163. }
  164. static inline bool uncore_pmc_freerunning(int idx)
  165. {
  166. return idx == UNCORE_PMC_IDX_FREERUNNING;
  167. }
  168. static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
  169. {
  170. return box->pmu->type->box_ctl;
  171. }
  172. static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
  173. {
  174. return box->pmu->type->fixed_ctl;
  175. }
  176. static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
  177. {
  178. return box->pmu->type->fixed_ctr;
  179. }
  180. static inline
  181. unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
  182. {
  183. if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
  184. return idx * 8 + box->pmu->type->event_ctl;
  185. return idx * 4 + box->pmu->type->event_ctl;
  186. }
  187. static inline
  188. unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
  189. {
  190. return idx * 8 + box->pmu->type->perf_ctr;
  191. }
  192. static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
  193. {
  194. struct intel_uncore_pmu *pmu = box->pmu;
  195. return pmu->type->msr_offsets ?
  196. pmu->type->msr_offsets[pmu->pmu_idx] :
  197. pmu->type->msr_offset * pmu->pmu_idx;
  198. }
  199. static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
  200. {
  201. if (!box->pmu->type->box_ctl)
  202. return 0;
  203. return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
  204. }
  205. static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
  206. {
  207. if (!box->pmu->type->fixed_ctl)
  208. return 0;
  209. return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
  210. }
  211. static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
  212. {
  213. return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
  214. }
  215. /*
  216. * In the uncore document, there is no event-code assigned to free running
  217. * counters. Some events need to be defined to indicate the free running
  218. * counters. The events are encoded as event-code + umask-code.
  219. *
  220. * The event-code for all free running counters is 0xff, which is the same as
  221. * the fixed counters.
  222. *
  223. * The umask-code is used to distinguish a fixed counter and a free running
  224. * counter, and different types of free running counters.
  225. * - For fixed counters, the umask-code is 0x0X.
  226. * X indicates the index of the fixed counter, which starts from 0.
  227. * - For free running counters, the umask-code uses the rest of the space.
  228. * It would bare the format of 0xXY.
  229. * X stands for the type of free running counters, which starts from 1.
  230. * Y stands for the index of free running counters of same type, which
  231. * starts from 0.
  232. *
  233. * For example, there are three types of IIO free running counters on Skylake
  234. * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
  235. * The event-code for all the free running counters is 0xff.
  236. * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
  237. * which umask-code starts from 0x10.
  238. * So 'ioclk' is encoded as event=0xff,umask=0x10
  239. * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
  240. * the second type, which umask-code starts from 0x20.
  241. * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
  242. */
  243. static inline unsigned int uncore_freerunning_idx(u64 config)
  244. {
  245. return ((config >> 8) & 0xf);
  246. }
  247. #define UNCORE_FREERUNNING_UMASK_START 0x10
  248. static inline unsigned int uncore_freerunning_type(u64 config)
  249. {
  250. return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
  251. }
  252. static inline
  253. unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
  254. struct perf_event *event)
  255. {
  256. unsigned int type = uncore_freerunning_type(event->attr.config);
  257. unsigned int idx = uncore_freerunning_idx(event->attr.config);
  258. struct intel_uncore_pmu *pmu = box->pmu;
  259. return pmu->type->freerunning[type].counter_base +
  260. pmu->type->freerunning[type].counter_offset * idx +
  261. pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
  262. }
  263. static inline
  264. unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
  265. {
  266. if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
  267. return CFL_UNC_CBO_7_PERFEVTSEL0 +
  268. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
  269. } else {
  270. return box->pmu->type->event_ctl +
  271. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
  272. uncore_msr_box_offset(box);
  273. }
  274. }
  275. static inline
  276. unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
  277. {
  278. if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
  279. return CFL_UNC_CBO_7_PER_CTR0 +
  280. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
  281. } else {
  282. return box->pmu->type->perf_ctr +
  283. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
  284. uncore_msr_box_offset(box);
  285. }
  286. }
  287. static inline
  288. unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
  289. {
  290. if (box->pci_dev)
  291. return uncore_pci_fixed_ctl(box);
  292. else
  293. return uncore_msr_fixed_ctl(box);
  294. }
  295. static inline
  296. unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
  297. {
  298. if (box->pci_dev)
  299. return uncore_pci_fixed_ctr(box);
  300. else
  301. return uncore_msr_fixed_ctr(box);
  302. }
  303. static inline
  304. unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
  305. {
  306. if (box->pci_dev)
  307. return uncore_pci_event_ctl(box, idx);
  308. else
  309. return uncore_msr_event_ctl(box, idx);
  310. }
  311. static inline
  312. unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
  313. {
  314. if (box->pci_dev)
  315. return uncore_pci_perf_ctr(box, idx);
  316. else
  317. return uncore_msr_perf_ctr(box, idx);
  318. }
  319. static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
  320. {
  321. return box->pmu->type->perf_ctr_bits;
  322. }
  323. static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
  324. {
  325. return box->pmu->type->fixed_ctr_bits;
  326. }
  327. static inline
  328. unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
  329. struct perf_event *event)
  330. {
  331. unsigned int type = uncore_freerunning_type(event->attr.config);
  332. return box->pmu->type->freerunning[type].bits;
  333. }
  334. static inline int uncore_num_freerunning(struct intel_uncore_box *box,
  335. struct perf_event *event)
  336. {
  337. unsigned int type = uncore_freerunning_type(event->attr.config);
  338. return box->pmu->type->freerunning[type].num_counters;
  339. }
  340. static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
  341. struct perf_event *event)
  342. {
  343. return box->pmu->type->num_freerunning_types;
  344. }
  345. static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
  346. struct perf_event *event)
  347. {
  348. unsigned int type = uncore_freerunning_type(event->attr.config);
  349. unsigned int idx = uncore_freerunning_idx(event->attr.config);
  350. return (type < uncore_num_freerunning_types(box, event)) &&
  351. (idx < uncore_num_freerunning(box, event));
  352. }
  353. static inline int uncore_num_counters(struct intel_uncore_box *box)
  354. {
  355. return box->pmu->type->num_counters;
  356. }
  357. static inline bool is_freerunning_event(struct perf_event *event)
  358. {
  359. u64 cfg = event->attr.config;
  360. return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
  361. (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
  362. }
  363. static inline void uncore_disable_box(struct intel_uncore_box *box)
  364. {
  365. if (box->pmu->type->ops->disable_box)
  366. box->pmu->type->ops->disable_box(box);
  367. }
  368. static inline void uncore_enable_box(struct intel_uncore_box *box)
  369. {
  370. if (box->pmu->type->ops->enable_box)
  371. box->pmu->type->ops->enable_box(box);
  372. }
  373. static inline void uncore_disable_event(struct intel_uncore_box *box,
  374. struct perf_event *event)
  375. {
  376. box->pmu->type->ops->disable_event(box, event);
  377. }
  378. static inline void uncore_enable_event(struct intel_uncore_box *box,
  379. struct perf_event *event)
  380. {
  381. box->pmu->type->ops->enable_event(box, event);
  382. }
  383. static inline u64 uncore_read_counter(struct intel_uncore_box *box,
  384. struct perf_event *event)
  385. {
  386. return box->pmu->type->ops->read_counter(box, event);
  387. }
  388. static inline void uncore_box_init(struct intel_uncore_box *box)
  389. {
  390. if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
  391. if (box->pmu->type->ops->init_box)
  392. box->pmu->type->ops->init_box(box);
  393. }
  394. }
  395. static inline void uncore_box_exit(struct intel_uncore_box *box)
  396. {
  397. if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
  398. if (box->pmu->type->ops->exit_box)
  399. box->pmu->type->ops->exit_box(box);
  400. }
  401. }
  402. static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
  403. {
  404. return (box->pkgid < 0);
  405. }
  406. static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
  407. {
  408. return container_of(event->pmu, struct intel_uncore_pmu, pmu);
  409. }
  410. static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
  411. {
  412. return event->pmu_private;
  413. }
  414. struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
  415. u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
  416. void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
  417. void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
  418. void uncore_pmu_event_start(struct perf_event *event, int flags);
  419. void uncore_pmu_event_stop(struct perf_event *event, int flags);
  420. int uncore_pmu_event_add(struct perf_event *event, int flags);
  421. void uncore_pmu_event_del(struct perf_event *event, int flags);
  422. void uncore_pmu_event_read(struct perf_event *event);
  423. void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
  424. struct event_constraint *
  425. uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
  426. void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
  427. u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
  428. extern struct intel_uncore_type **uncore_msr_uncores;
  429. extern struct intel_uncore_type **uncore_pci_uncores;
  430. extern struct pci_driver *uncore_pci_driver;
  431. extern raw_spinlock_t pci2phy_map_lock;
  432. extern struct list_head pci2phy_map_head;
  433. extern struct pci_extra_dev *uncore_extra_pci_dev;
  434. extern struct event_constraint uncore_constraint_empty;
  435. /* uncore_snb.c */
  436. int snb_uncore_pci_init(void);
  437. int ivb_uncore_pci_init(void);
  438. int hsw_uncore_pci_init(void);
  439. int bdw_uncore_pci_init(void);
  440. int skl_uncore_pci_init(void);
  441. void snb_uncore_cpu_init(void);
  442. void nhm_uncore_cpu_init(void);
  443. void skl_uncore_cpu_init(void);
  444. int snb_pci2phy_map_init(int devid);
  445. /* uncore_snbep.c */
  446. int snbep_uncore_pci_init(void);
  447. void snbep_uncore_cpu_init(void);
  448. int ivbep_uncore_pci_init(void);
  449. void ivbep_uncore_cpu_init(void);
  450. int hswep_uncore_pci_init(void);
  451. void hswep_uncore_cpu_init(void);
  452. int bdx_uncore_pci_init(void);
  453. void bdx_uncore_cpu_init(void);
  454. int knl_uncore_pci_init(void);
  455. void knl_uncore_cpu_init(void);
  456. int skx_uncore_pci_init(void);
  457. void skx_uncore_cpu_init(void);
  458. /* uncore_nhmex.c */
  459. void nhmex_uncore_cpu_init(void);