pmu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /*
  2. * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates.
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. * Gleb Natapov <gleb@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/perf_event.h>
  17. #include "x86.h"
  18. #include "cpuid.h"
  19. #include "lapic.h"
  20. static struct kvm_arch_event_perf_mapping {
  21. u8 eventsel;
  22. u8 unit_mask;
  23. unsigned event_type;
  24. bool inexact;
  25. } arch_events[] = {
  26. /* Index must match CPUID 0x0A.EBX bit vector */
  27. [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
  28. [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
  29. [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
  30. [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
  31. [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
  32. [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
  33. [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
  34. [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
  35. };
  36. /* mapping between fixed pmc index and arch_events array */
  37. int fixed_pmc_events[] = {1, 0, 7};
  38. static bool pmc_is_gp(struct kvm_pmc *pmc)
  39. {
  40. return pmc->type == KVM_PMC_GP;
  41. }
  42. static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
  43. {
  44. struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
  45. return pmu->counter_bitmask[pmc->type];
  46. }
  47. static inline bool pmc_enabled(struct kvm_pmc *pmc)
  48. {
  49. struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
  50. return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
  51. }
  52. static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
  53. u32 base)
  54. {
  55. if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
  56. return &pmu->gp_counters[msr - base];
  57. return NULL;
  58. }
  59. static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
  60. {
  61. int base = MSR_CORE_PERF_FIXED_CTR0;
  62. if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
  63. return &pmu->fixed_counters[msr - base];
  64. return NULL;
  65. }
  66. static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
  67. {
  68. return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
  69. }
  70. static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
  71. {
  72. if (idx < INTEL_PMC_IDX_FIXED)
  73. return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
  74. else
  75. return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
  76. }
  77. void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
  78. {
  79. if (vcpu->arch.apic)
  80. kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
  81. }
  82. static void trigger_pmi(struct irq_work *irq_work)
  83. {
  84. struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
  85. irq_work);
  86. struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
  87. arch.pmu);
  88. kvm_deliver_pmi(vcpu);
  89. }
  90. static void kvm_perf_overflow(struct perf_event *perf_event,
  91. struct perf_sample_data *data,
  92. struct pt_regs *regs)
  93. {
  94. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  95. struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
  96. if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
  97. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  98. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  99. }
  100. }
  101. static void kvm_perf_overflow_intr(struct perf_event *perf_event,
  102. struct perf_sample_data *data, struct pt_regs *regs)
  103. {
  104. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  105. struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
  106. if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
  107. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  108. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  109. /*
  110. * Inject PMI. If vcpu was in a guest mode during NMI PMI
  111. * can be ejected on a guest mode re-entry. Otherwise we can't
  112. * be sure that vcpu wasn't executing hlt instruction at the
  113. * time of vmexit and is not going to re-enter guest mode until,
  114. * woken up. So we should wake it, but this is impossible from
  115. * NMI context. Do it from irq work instead.
  116. */
  117. if (!kvm_is_in_guest())
  118. irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
  119. else
  120. kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
  121. }
  122. }
  123. static u64 read_pmc(struct kvm_pmc *pmc)
  124. {
  125. u64 counter, enabled, running;
  126. counter = pmc->counter;
  127. if (pmc->perf_event)
  128. counter += perf_event_read_value(pmc->perf_event,
  129. &enabled, &running);
  130. /* FIXME: Scaling needed? */
  131. return counter & pmc_bitmask(pmc);
  132. }
  133. static void stop_counter(struct kvm_pmc *pmc)
  134. {
  135. if (pmc->perf_event) {
  136. pmc->counter = read_pmc(pmc);
  137. perf_event_release_kernel(pmc->perf_event);
  138. pmc->perf_event = NULL;
  139. }
  140. }
  141. static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
  142. unsigned config, bool exclude_user, bool exclude_kernel,
  143. bool intr, bool in_tx, bool in_tx_cp)
  144. {
  145. struct perf_event *event;
  146. struct perf_event_attr attr = {
  147. .type = type,
  148. .size = sizeof(attr),
  149. .pinned = true,
  150. .exclude_idle = true,
  151. .exclude_host = 1,
  152. .exclude_user = exclude_user,
  153. .exclude_kernel = exclude_kernel,
  154. .config = config,
  155. };
  156. if (in_tx)
  157. attr.config |= HSW_IN_TX;
  158. if (in_tx_cp)
  159. attr.config |= HSW_IN_TX_CHECKPOINTED;
  160. attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
  161. event = perf_event_create_kernel_counter(&attr, -1, current,
  162. intr ? kvm_perf_overflow_intr :
  163. kvm_perf_overflow, pmc);
  164. if (IS_ERR(event)) {
  165. printk_once("kvm: pmu event creation failed %ld\n",
  166. PTR_ERR(event));
  167. return;
  168. }
  169. pmc->perf_event = event;
  170. clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
  171. }
  172. static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
  173. u8 unit_mask)
  174. {
  175. int i;
  176. for (i = 0; i < ARRAY_SIZE(arch_events); i++)
  177. if (arch_events[i].eventsel == event_select
  178. && arch_events[i].unit_mask == unit_mask
  179. && (pmu->available_event_types & (1 << i)))
  180. break;
  181. if (i == ARRAY_SIZE(arch_events))
  182. return PERF_COUNT_HW_MAX;
  183. return arch_events[i].event_type;
  184. }
  185. static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
  186. {
  187. unsigned config, type = PERF_TYPE_RAW;
  188. u8 event_select, unit_mask;
  189. if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
  190. printk_once("kvm pmu: pin control bit is ignored\n");
  191. pmc->eventsel = eventsel;
  192. stop_counter(pmc);
  193. if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
  194. return;
  195. event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
  196. unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
  197. if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
  198. ARCH_PERFMON_EVENTSEL_INV |
  199. ARCH_PERFMON_EVENTSEL_CMASK |
  200. HSW_IN_TX |
  201. HSW_IN_TX_CHECKPOINTED))) {
  202. config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
  203. unit_mask);
  204. if (config != PERF_COUNT_HW_MAX)
  205. type = PERF_TYPE_HARDWARE;
  206. }
  207. if (type == PERF_TYPE_RAW)
  208. config = eventsel & X86_RAW_EVENT_MASK;
  209. reprogram_counter(pmc, type, config,
  210. !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
  211. !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
  212. eventsel & ARCH_PERFMON_EVENTSEL_INT,
  213. (eventsel & HSW_IN_TX),
  214. (eventsel & HSW_IN_TX_CHECKPOINTED));
  215. }
  216. static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
  217. {
  218. unsigned en = en_pmi & 0x3;
  219. bool pmi = en_pmi & 0x8;
  220. stop_counter(pmc);
  221. if (!en || !pmc_enabled(pmc))
  222. return;
  223. reprogram_counter(pmc, PERF_TYPE_HARDWARE,
  224. arch_events[fixed_pmc_events[idx]].event_type,
  225. !(en & 0x2), /* exclude user */
  226. !(en & 0x1), /* exclude kernel */
  227. pmi, false, false);
  228. }
  229. static inline u8 fixed_en_pmi(u64 ctrl, int idx)
  230. {
  231. return (ctrl >> (idx * 4)) & 0xf;
  232. }
  233. static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
  234. {
  235. int i;
  236. for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
  237. u8 en_pmi = fixed_en_pmi(data, i);
  238. struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
  239. if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
  240. continue;
  241. reprogram_fixed_counter(pmc, en_pmi, i);
  242. }
  243. pmu->fixed_ctr_ctrl = data;
  244. }
  245. static void reprogram_idx(struct kvm_pmu *pmu, int idx)
  246. {
  247. struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
  248. if (!pmc)
  249. return;
  250. if (pmc_is_gp(pmc))
  251. reprogram_gp_counter(pmc, pmc->eventsel);
  252. else {
  253. int fidx = idx - INTEL_PMC_IDX_FIXED;
  254. reprogram_fixed_counter(pmc,
  255. fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
  256. }
  257. }
  258. static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
  259. {
  260. int bit;
  261. u64 diff = pmu->global_ctrl ^ data;
  262. pmu->global_ctrl = data;
  263. for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
  264. reprogram_idx(pmu, bit);
  265. }
  266. bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
  267. {
  268. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  269. int ret;
  270. switch (msr) {
  271. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  272. case MSR_CORE_PERF_GLOBAL_STATUS:
  273. case MSR_CORE_PERF_GLOBAL_CTRL:
  274. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  275. ret = pmu->version > 1;
  276. break;
  277. default:
  278. ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
  279. || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
  280. || get_fixed_pmc(pmu, msr);
  281. break;
  282. }
  283. return ret;
  284. }
  285. int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
  286. {
  287. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  288. struct kvm_pmc *pmc;
  289. switch (index) {
  290. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  291. *data = pmu->fixed_ctr_ctrl;
  292. return 0;
  293. case MSR_CORE_PERF_GLOBAL_STATUS:
  294. *data = pmu->global_status;
  295. return 0;
  296. case MSR_CORE_PERF_GLOBAL_CTRL:
  297. *data = pmu->global_ctrl;
  298. return 0;
  299. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  300. *data = pmu->global_ovf_ctrl;
  301. return 0;
  302. default:
  303. if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
  304. (pmc = get_fixed_pmc(pmu, index))) {
  305. *data = read_pmc(pmc);
  306. return 0;
  307. } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
  308. *data = pmc->eventsel;
  309. return 0;
  310. }
  311. }
  312. return 1;
  313. }
  314. int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  315. {
  316. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  317. struct kvm_pmc *pmc;
  318. u32 index = msr_info->index;
  319. u64 data = msr_info->data;
  320. switch (index) {
  321. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  322. if (pmu->fixed_ctr_ctrl == data)
  323. return 0;
  324. if (!(data & 0xfffffffffffff444ull)) {
  325. reprogram_fixed_counters(pmu, data);
  326. return 0;
  327. }
  328. break;
  329. case MSR_CORE_PERF_GLOBAL_STATUS:
  330. if (msr_info->host_initiated) {
  331. pmu->global_status = data;
  332. return 0;
  333. }
  334. break; /* RO MSR */
  335. case MSR_CORE_PERF_GLOBAL_CTRL:
  336. if (pmu->global_ctrl == data)
  337. return 0;
  338. if (!(data & pmu->global_ctrl_mask)) {
  339. global_ctrl_changed(pmu, data);
  340. return 0;
  341. }
  342. break;
  343. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  344. if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
  345. if (!msr_info->host_initiated)
  346. pmu->global_status &= ~data;
  347. pmu->global_ovf_ctrl = data;
  348. return 0;
  349. }
  350. break;
  351. default:
  352. if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
  353. (pmc = get_fixed_pmc(pmu, index))) {
  354. if (!msr_info->host_initiated)
  355. data = (s64)(s32)data;
  356. pmc->counter += data - read_pmc(pmc);
  357. return 0;
  358. } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
  359. if (data == pmc->eventsel)
  360. return 0;
  361. if (!(data & pmu->reserved_bits)) {
  362. reprogram_gp_counter(pmc, data);
  363. return 0;
  364. }
  365. }
  366. }
  367. return 1;
  368. }
  369. int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
  370. {
  371. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  372. bool fast_mode = pmc & (1u << 31);
  373. bool fixed = pmc & (1u << 30);
  374. struct kvm_pmc *counters;
  375. u64 ctr;
  376. pmc &= ~(3u << 30);
  377. if (!fixed && pmc >= pmu->nr_arch_gp_counters)
  378. return 1;
  379. if (fixed && pmc >= pmu->nr_arch_fixed_counters)
  380. return 1;
  381. counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
  382. ctr = read_pmc(&counters[pmc]);
  383. if (fast_mode)
  384. ctr = (u32)ctr;
  385. *data = ctr;
  386. return 0;
  387. }
  388. void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
  389. {
  390. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  391. struct kvm_cpuid_entry2 *entry;
  392. unsigned bitmap_len;
  393. pmu->nr_arch_gp_counters = 0;
  394. pmu->nr_arch_fixed_counters = 0;
  395. pmu->counter_bitmask[KVM_PMC_GP] = 0;
  396. pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
  397. pmu->version = 0;
  398. pmu->reserved_bits = 0xffffffff00200000ull;
  399. entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
  400. if (!entry)
  401. return;
  402. pmu->version = entry->eax & 0xff;
  403. if (!pmu->version)
  404. return;
  405. pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
  406. INTEL_PMC_MAX_GENERIC);
  407. pmu->counter_bitmask[KVM_PMC_GP] =
  408. ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
  409. bitmap_len = (entry->eax >> 24) & 0xff;
  410. pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
  411. if (pmu->version == 1) {
  412. pmu->nr_arch_fixed_counters = 0;
  413. } else {
  414. pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
  415. INTEL_PMC_MAX_FIXED);
  416. pmu->counter_bitmask[KVM_PMC_FIXED] =
  417. ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
  418. }
  419. pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
  420. (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
  421. pmu->global_ctrl_mask = ~pmu->global_ctrl;
  422. entry = kvm_find_cpuid_entry(vcpu, 7, 0);
  423. if (entry &&
  424. (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
  425. (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
  426. pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
  427. }
  428. void kvm_pmu_init(struct kvm_vcpu *vcpu)
  429. {
  430. int i;
  431. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  432. memset(pmu, 0, sizeof(*pmu));
  433. for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
  434. pmu->gp_counters[i].type = KVM_PMC_GP;
  435. pmu->gp_counters[i].vcpu = vcpu;
  436. pmu->gp_counters[i].idx = i;
  437. }
  438. for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
  439. pmu->fixed_counters[i].type = KVM_PMC_FIXED;
  440. pmu->fixed_counters[i].vcpu = vcpu;
  441. pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
  442. }
  443. init_irq_work(&pmu->irq_work, trigger_pmi);
  444. kvm_pmu_cpuid_update(vcpu);
  445. }
  446. void kvm_pmu_reset(struct kvm_vcpu *vcpu)
  447. {
  448. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  449. int i;
  450. irq_work_sync(&pmu->irq_work);
  451. for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
  452. struct kvm_pmc *pmc = &pmu->gp_counters[i];
  453. stop_counter(pmc);
  454. pmc->counter = pmc->eventsel = 0;
  455. }
  456. for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
  457. stop_counter(&pmu->fixed_counters[i]);
  458. pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
  459. pmu->global_ovf_ctrl = 0;
  460. }
  461. void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
  462. {
  463. kvm_pmu_reset(vcpu);
  464. }
  465. void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
  466. {
  467. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  468. u64 bitmask;
  469. int bit;
  470. bitmask = pmu->reprogram_pmi;
  471. for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
  472. struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
  473. if (unlikely(!pmc || !pmc->perf_event)) {
  474. clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
  475. continue;
  476. }
  477. reprogram_idx(pmu, bit);
  478. }
  479. }