turbostat.c 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134
  1. /*
  2. * turbostat -- show CPU frequency and C-state residency
  3. * on modern Intel turbo-capable processors.
  4. *
  5. * Copyright (c) 2013 Intel Corporation.
  6. * Len Brown <len.brown@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. #define _GNU_SOURCE
  22. #include MSRHEADER
  23. #include INTEL_FAMILY_HEADER
  24. #include <stdarg.h>
  25. #include <stdio.h>
  26. #include <err.h>
  27. #include <unistd.h>
  28. #include <sys/types.h>
  29. #include <sys/wait.h>
  30. #include <sys/stat.h>
  31. #include <sys/resource.h>
  32. #include <fcntl.h>
  33. #include <signal.h>
  34. #include <sys/time.h>
  35. #include <stdlib.h>
  36. #include <getopt.h>
  37. #include <dirent.h>
  38. #include <string.h>
  39. #include <ctype.h>
  40. #include <sched.h>
  41. #include <time.h>
  42. #include <cpuid.h>
  43. #include <linux/capability.h>
  44. #include <errno.h>
  45. char *proc_stat = "/proc/stat";
  46. FILE *outf;
  47. int *fd_percpu;
  48. struct timespec interval_ts = {5, 0};
  49. unsigned int debug;
  50. unsigned int rapl_joules;
  51. unsigned int summary_only;
  52. unsigned int dump_only;
  53. unsigned int do_nhm_cstates;
  54. unsigned int do_snb_cstates;
  55. unsigned int do_knl_cstates;
  56. unsigned int do_pc2;
  57. unsigned int do_pc3;
  58. unsigned int do_pc6;
  59. unsigned int do_pc7;
  60. unsigned int do_c8_c9_c10;
  61. unsigned int do_skl_residency;
  62. unsigned int do_slm_cstates;
  63. unsigned int use_c1_residency_msr;
  64. unsigned int has_aperf;
  65. unsigned int has_epb;
  66. unsigned int do_irtl_snb;
  67. unsigned int do_irtl_hsw;
  68. unsigned int units = 1000000; /* MHz etc */
  69. unsigned int genuine_intel;
  70. unsigned int has_invariant_tsc;
  71. unsigned int do_nhm_platform_info;
  72. unsigned int aperf_mperf_multiplier = 1;
  73. int do_irq = 1;
  74. int do_smi;
  75. double bclk;
  76. double base_hz;
  77. unsigned int has_base_hz;
  78. double tsc_tweak = 1.0;
  79. unsigned int show_pkg;
  80. unsigned int show_core;
  81. unsigned int show_cpu;
  82. unsigned int show_pkg_only;
  83. unsigned int show_core_only;
  84. char *output_buffer, *outp;
  85. unsigned int do_rapl;
  86. unsigned int do_dts;
  87. unsigned int do_ptm;
  88. unsigned int do_gfx_rc6_ms;
  89. unsigned long long gfx_cur_rc6_ms;
  90. unsigned int do_gfx_mhz;
  91. unsigned int gfx_cur_mhz;
  92. unsigned int tcc_activation_temp;
  93. unsigned int tcc_activation_temp_override;
  94. double rapl_power_units, rapl_time_units;
  95. double rapl_dram_energy_units, rapl_energy_units;
  96. double rapl_joule_counter_range;
  97. unsigned int do_core_perf_limit_reasons;
  98. unsigned int do_gfx_perf_limit_reasons;
  99. unsigned int do_ring_perf_limit_reasons;
  100. unsigned int crystal_hz;
  101. unsigned long long tsc_hz;
  102. int base_cpu;
  103. double discover_bclk(unsigned int family, unsigned int model);
  104. unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
  105. /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
  106. unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */
  107. unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
  108. unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
  109. unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
  110. #define RAPL_PKG (1 << 0)
  111. /* 0x610 MSR_PKG_POWER_LIMIT */
  112. /* 0x611 MSR_PKG_ENERGY_STATUS */
  113. #define RAPL_PKG_PERF_STATUS (1 << 1)
  114. /* 0x613 MSR_PKG_PERF_STATUS */
  115. #define RAPL_PKG_POWER_INFO (1 << 2)
  116. /* 0x614 MSR_PKG_POWER_INFO */
  117. #define RAPL_DRAM (1 << 3)
  118. /* 0x618 MSR_DRAM_POWER_LIMIT */
  119. /* 0x619 MSR_DRAM_ENERGY_STATUS */
  120. #define RAPL_DRAM_PERF_STATUS (1 << 4)
  121. /* 0x61b MSR_DRAM_PERF_STATUS */
  122. #define RAPL_DRAM_POWER_INFO (1 << 5)
  123. /* 0x61c MSR_DRAM_POWER_INFO */
  124. #define RAPL_CORES_POWER_LIMIT (1 << 6)
  125. /* 0x638 MSR_PP0_POWER_LIMIT */
  126. #define RAPL_CORE_POLICY (1 << 7)
  127. /* 0x63a MSR_PP0_POLICY */
  128. #define RAPL_GFX (1 << 8)
  129. /* 0x640 MSR_PP1_POWER_LIMIT */
  130. /* 0x641 MSR_PP1_ENERGY_STATUS */
  131. /* 0x642 MSR_PP1_POLICY */
  132. #define RAPL_CORES_ENERGY_STATUS (1 << 9)
  133. /* 0x639 MSR_PP0_ENERGY_STATUS */
  134. #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
  135. #define TJMAX_DEFAULT 100
  136. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  137. /*
  138. * buffer size used by sscanf() for added column names
  139. * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters
  140. */
  141. #define NAME_BYTES 20
  142. int backwards_count;
  143. char *progname;
  144. cpu_set_t *cpu_present_set, *cpu_affinity_set;
  145. size_t cpu_present_setsize, cpu_affinity_setsize;
  146. struct thread_data {
  147. unsigned long long tsc;
  148. unsigned long long aperf;
  149. unsigned long long mperf;
  150. unsigned long long c1;
  151. unsigned int irq_count;
  152. unsigned int smi_count;
  153. unsigned int cpu_id;
  154. unsigned int flags;
  155. #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
  156. #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
  157. unsigned long long counter[1];
  158. } *thread_even, *thread_odd;
  159. struct core_data {
  160. unsigned long long c3;
  161. unsigned long long c6;
  162. unsigned long long c7;
  163. unsigned int core_temp_c;
  164. unsigned int core_id;
  165. unsigned long long counter[1];
  166. } *core_even, *core_odd;
  167. struct pkg_data {
  168. unsigned long long pc2;
  169. unsigned long long pc3;
  170. unsigned long long pc6;
  171. unsigned long long pc7;
  172. unsigned long long pc8;
  173. unsigned long long pc9;
  174. unsigned long long pc10;
  175. unsigned long long pkg_wtd_core_c0;
  176. unsigned long long pkg_any_core_c0;
  177. unsigned long long pkg_any_gfxe_c0;
  178. unsigned long long pkg_both_core_gfxe_c0;
  179. long long gfx_rc6_ms;
  180. unsigned int gfx_mhz;
  181. unsigned int package_id;
  182. unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
  183. unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
  184. unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
  185. unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
  186. unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
  187. unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
  188. unsigned int pkg_temp_c;
  189. unsigned long long counter[1];
  190. } *package_even, *package_odd;
  191. #define ODD_COUNTERS thread_odd, core_odd, package_odd
  192. #define EVEN_COUNTERS thread_even, core_even, package_even
  193. #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
  194. (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
  195. topo.num_threads_per_core + \
  196. (core_no) * topo.num_threads_per_core + (thread_no))
  197. #define GET_CORE(core_base, core_no, pkg_no) \
  198. (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
  199. #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
  200. enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE};
  201. enum counter_type {COUNTER_CYCLES, COUNTER_SECONDS};
  202. enum counter_format {FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT};
  203. struct msr_counter {
  204. unsigned int msr_num;
  205. char name[NAME_BYTES];
  206. unsigned int width;
  207. enum counter_type type;
  208. enum counter_format format;
  209. struct msr_counter *next;
  210. };
  211. struct sys_counters {
  212. unsigned int thread_counter_bytes;
  213. unsigned int core_counter_bytes;
  214. unsigned int package_counter_bytes;
  215. struct msr_counter *tp;
  216. struct msr_counter *cp;
  217. struct msr_counter *pp;
  218. } sys;
  219. struct system_summary {
  220. struct thread_data threads;
  221. struct core_data cores;
  222. struct pkg_data packages;
  223. } average;
  224. struct topo_params {
  225. int num_packages;
  226. int num_cpus;
  227. int num_cores;
  228. int max_cpu_num;
  229. int num_cores_per_pkg;
  230. int num_threads_per_core;
  231. } topo;
  232. struct timeval tv_even, tv_odd, tv_delta;
  233. int *irq_column_2_cpu; /* /proc/interrupts column numbers */
  234. int *irqs_per_cpu; /* indexed by cpu_num */
  235. void setup_all_buffers(void);
  236. int cpu_is_not_present(int cpu)
  237. {
  238. return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
  239. }
  240. /*
  241. * run func(thread, core, package) in topology order
  242. * skip non-present cpus
  243. */
  244. int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
  245. struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
  246. {
  247. int retval, pkg_no, core_no, thread_no;
  248. for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
  249. for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
  250. for (thread_no = 0; thread_no <
  251. topo.num_threads_per_core; ++thread_no) {
  252. struct thread_data *t;
  253. struct core_data *c;
  254. struct pkg_data *p;
  255. t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
  256. if (cpu_is_not_present(t->cpu_id))
  257. continue;
  258. c = GET_CORE(core_base, core_no, pkg_no);
  259. p = GET_PKG(pkg_base, pkg_no);
  260. retval = func(t, c, p);
  261. if (retval)
  262. return retval;
  263. }
  264. }
  265. }
  266. return 0;
  267. }
  268. int cpu_migrate(int cpu)
  269. {
  270. CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
  271. CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
  272. if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
  273. return -1;
  274. else
  275. return 0;
  276. }
  277. int get_msr_fd(int cpu)
  278. {
  279. char pathname[32];
  280. int fd;
  281. fd = fd_percpu[cpu];
  282. if (fd)
  283. return fd;
  284. sprintf(pathname, "/dev/cpu/%d/msr", cpu);
  285. fd = open(pathname, O_RDONLY);
  286. if (fd < 0)
  287. err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
  288. fd_percpu[cpu] = fd;
  289. return fd;
  290. }
  291. int get_msr(int cpu, off_t offset, unsigned long long *msr)
  292. {
  293. ssize_t retval;
  294. retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
  295. if (retval != sizeof *msr)
  296. err(-1, "msr %d offset 0x%llx read failed", cpu, (unsigned long long)offset);
  297. return 0;
  298. }
  299. /*
  300. * Example Format w/ field column widths:
  301. *
  302. * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz IRQ SMI Busy% CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 ThreadC CoreTmp CoreCnt PkgTmp GFXMHz Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt PkgCnt
  303. * 12345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
  304. */
  305. void print_header(void)
  306. {
  307. struct msr_counter *mp;
  308. if (show_pkg)
  309. outp += sprintf(outp, "\tPackage");
  310. if (show_core)
  311. outp += sprintf(outp, "\tCore");
  312. if (show_cpu)
  313. outp += sprintf(outp, "\tCPU");
  314. if (has_aperf)
  315. outp += sprintf(outp, "\tAvg_MHz");
  316. if (has_aperf)
  317. outp += sprintf(outp, "\tBusy%%");
  318. if (has_aperf)
  319. outp += sprintf(outp, "\tBzy_MHz");
  320. outp += sprintf(outp, "\tTSC_MHz");
  321. if (!debug)
  322. goto done;
  323. if (do_irq)
  324. outp += sprintf(outp, "\tIRQ");
  325. if (do_smi)
  326. outp += sprintf(outp, "\tSMI");
  327. if (do_nhm_cstates)
  328. outp += sprintf(outp, "\tCPU%%c1");
  329. if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
  330. outp += sprintf(outp, "\tCPU%%c3");
  331. if (do_nhm_cstates)
  332. outp += sprintf(outp, "\tCPU%%c6");
  333. if (do_snb_cstates)
  334. outp += sprintf(outp, "\tCPU%%c7");
  335. for (mp = sys.tp; mp; mp = mp->next) {
  336. if (mp->format == FORMAT_RAW) {
  337. if (mp->width == 64)
  338. outp += sprintf(outp, "\t%18.18s", mp->name);
  339. else
  340. outp += sprintf(outp, "\t%10.10s", mp->name);
  341. } else {
  342. outp += sprintf(outp, "\t%-7.7s", mp->name);
  343. }
  344. }
  345. if (do_dts)
  346. outp += sprintf(outp, "\tCoreTmp");
  347. for (mp = sys.cp; mp; mp = mp->next) {
  348. if (mp->format == FORMAT_RAW) {
  349. if (mp->width == 64)
  350. outp += sprintf(outp, "\t%18.18s", mp->name);
  351. else
  352. outp += sprintf(outp, "\t%10.10s", mp->name);
  353. } else {
  354. outp += sprintf(outp, "\t%-7.7s", mp->name);
  355. }
  356. }
  357. if (do_ptm)
  358. outp += sprintf(outp, "\tPkgTmp");
  359. if (do_gfx_rc6_ms)
  360. outp += sprintf(outp, "\tGFX%%rc6");
  361. if (do_gfx_mhz)
  362. outp += sprintf(outp, "\tGFXMHz");
  363. if (do_skl_residency) {
  364. outp += sprintf(outp, "\tTotl%%C0");
  365. outp += sprintf(outp, "\tAny%%C0");
  366. outp += sprintf(outp, "\tGFX%%C0");
  367. outp += sprintf(outp, "\tCPUGFX%%");
  368. }
  369. if (do_pc2)
  370. outp += sprintf(outp, "\tPkg%%pc2");
  371. if (do_pc3)
  372. outp += sprintf(outp, "\tPkg%%pc3");
  373. if (do_pc6)
  374. outp += sprintf(outp, "\tPkg%%pc6");
  375. if (do_pc7)
  376. outp += sprintf(outp, "\tPkg%%pc7");
  377. if (do_c8_c9_c10) {
  378. outp += sprintf(outp, "\tPkg%%pc8");
  379. outp += sprintf(outp, "\tPkg%%pc9");
  380. outp += sprintf(outp, "\tPk%%pc10");
  381. }
  382. if (do_rapl && !rapl_joules) {
  383. if (do_rapl & RAPL_PKG)
  384. outp += sprintf(outp, "\tPkgWatt");
  385. if (do_rapl & RAPL_CORES_ENERGY_STATUS)
  386. outp += sprintf(outp, "\tCorWatt");
  387. if (do_rapl & RAPL_GFX)
  388. outp += sprintf(outp, "\tGFXWatt");
  389. if (do_rapl & RAPL_DRAM)
  390. outp += sprintf(outp, "\tRAMWatt");
  391. if (do_rapl & RAPL_PKG_PERF_STATUS)
  392. outp += sprintf(outp, "\tPKG_%%");
  393. if (do_rapl & RAPL_DRAM_PERF_STATUS)
  394. outp += sprintf(outp, "\tRAM_%%");
  395. } else if (do_rapl && rapl_joules) {
  396. if (do_rapl & RAPL_PKG)
  397. outp += sprintf(outp, "\tPkg_J");
  398. if (do_rapl & RAPL_CORES_ENERGY_STATUS)
  399. outp += sprintf(outp, "\tCor_J");
  400. if (do_rapl & RAPL_GFX)
  401. outp += sprintf(outp, "\tGFX_J");
  402. if (do_rapl & RAPL_DRAM)
  403. outp += sprintf(outp, "\tRAM_J");
  404. if (do_rapl & RAPL_PKG_PERF_STATUS)
  405. outp += sprintf(outp, "\tPKG_%%");
  406. if (do_rapl & RAPL_DRAM_PERF_STATUS)
  407. outp += sprintf(outp, "\tRAM_%%");
  408. }
  409. for (mp = sys.pp; mp; mp = mp->next) {
  410. if (mp->format == FORMAT_RAW) {
  411. if (mp->width == 64)
  412. outp += sprintf(outp, "\t%18.18s", mp->name);
  413. else
  414. outp += sprintf(outp, "\t%10.10s", mp->name);
  415. } else {
  416. outp += sprintf(outp, "\t%-7.7s", mp->name);
  417. }
  418. }
  419. done:
  420. outp += sprintf(outp, "\n");
  421. }
  422. int dump_counters(struct thread_data *t, struct core_data *c,
  423. struct pkg_data *p)
  424. {
  425. int i;
  426. struct msr_counter *mp;
  427. outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
  428. if (t) {
  429. outp += sprintf(outp, "CPU: %d flags 0x%x\n",
  430. t->cpu_id, t->flags);
  431. outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
  432. outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
  433. outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
  434. outp += sprintf(outp, "c1: %016llX\n", t->c1);
  435. if (do_irq)
  436. outp += sprintf(outp, "IRQ: %08X\n", t->irq_count);
  437. if (do_smi)
  438. outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
  439. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
  440. outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n",
  441. i, mp->msr_num, t->counter[i]);
  442. }
  443. }
  444. if (c) {
  445. outp += sprintf(outp, "core: %d\n", c->core_id);
  446. outp += sprintf(outp, "c3: %016llX\n", c->c3);
  447. outp += sprintf(outp, "c6: %016llX\n", c->c6);
  448. outp += sprintf(outp, "c7: %016llX\n", c->c7);
  449. outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
  450. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
  451. outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
  452. i, mp->msr_num, c->counter[i]);
  453. }
  454. }
  455. if (p) {
  456. outp += sprintf(outp, "package: %d\n", p->package_id);
  457. outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
  458. outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
  459. outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
  460. outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
  461. outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
  462. if (do_pc3)
  463. outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
  464. if (do_pc6)
  465. outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
  466. if (do_pc7)
  467. outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
  468. outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
  469. outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
  470. outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
  471. outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
  472. outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
  473. outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
  474. outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
  475. outp += sprintf(outp, "Throttle PKG: %0X\n",
  476. p->rapl_pkg_perf_status);
  477. outp += sprintf(outp, "Throttle RAM: %0X\n",
  478. p->rapl_dram_perf_status);
  479. outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
  480. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
  481. outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n",
  482. i, mp->msr_num, p->counter[i]);
  483. }
  484. }
  485. outp += sprintf(outp, "\n");
  486. return 0;
  487. }
  488. /*
  489. * column formatting convention & formats
  490. */
  491. int format_counters(struct thread_data *t, struct core_data *c,
  492. struct pkg_data *p)
  493. {
  494. double interval_float;
  495. char *fmt8;
  496. int i;
  497. struct msr_counter *mp;
  498. /* if showing only 1st thread in core and this isn't one, bail out */
  499. if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  500. return 0;
  501. /* if showing only 1st thread in pkg and this isn't one, bail out */
  502. if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  503. return 0;
  504. interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
  505. /* topo columns, print blanks on 1st (average) line */
  506. if (t == &average.threads) {
  507. if (show_pkg)
  508. outp += sprintf(outp, "\t-");
  509. if (show_core)
  510. outp += sprintf(outp, "\t-");
  511. if (show_cpu)
  512. outp += sprintf(outp, "\t-");
  513. } else {
  514. if (show_pkg) {
  515. if (p)
  516. outp += sprintf(outp, "\t%d", p->package_id);
  517. else
  518. outp += sprintf(outp, "\t-");
  519. }
  520. if (show_core) {
  521. if (c)
  522. outp += sprintf(outp, "\t%d", c->core_id);
  523. else
  524. outp += sprintf(outp, "\t-");
  525. }
  526. if (show_cpu)
  527. outp += sprintf(outp, "\t%d", t->cpu_id);
  528. }
  529. /* Avg_MHz */
  530. if (has_aperf)
  531. outp += sprintf(outp, "\t%.0f",
  532. 1.0 / units * t->aperf / interval_float);
  533. /* Busy% */
  534. if (has_aperf)
  535. outp += sprintf(outp, "\t%.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
  536. /* Bzy_MHz */
  537. if (has_aperf) {
  538. if (has_base_hz)
  539. outp += sprintf(outp, "\t%.0f", base_hz / units * t->aperf / t->mperf);
  540. else
  541. outp += sprintf(outp, "\t%.0f",
  542. 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
  543. }
  544. /* TSC_MHz */
  545. outp += sprintf(outp, "\t%.0f", 1.0 * t->tsc/units/interval_float);
  546. if (!debug)
  547. goto done;
  548. /* IRQ */
  549. if (do_irq)
  550. outp += sprintf(outp, "\t%d", t->irq_count);
  551. /* SMI */
  552. if (do_smi)
  553. outp += sprintf(outp, "\t%d", t->smi_count);
  554. if (do_nhm_cstates)
  555. outp += sprintf(outp, "\t%.2f", 100.0 * t->c1/t->tsc);
  556. /* print per-core data only for 1st thread in core */
  557. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  558. goto done;
  559. if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
  560. outp += sprintf(outp, "\t%.2f", 100.0 * c->c3/t->tsc);
  561. if (do_nhm_cstates)
  562. outp += sprintf(outp, "\t%.2f", 100.0 * c->c6/t->tsc);
  563. if (do_snb_cstates)
  564. outp += sprintf(outp, "\t%.2f", 100.0 * c->c7/t->tsc);
  565. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
  566. if (mp->format == FORMAT_RAW) {
  567. if (mp->width == 32)
  568. outp += sprintf(outp, "\t0x%08lx", (unsigned long) t->counter[i]);
  569. else
  570. outp += sprintf(outp, "\t0x%016llx", t->counter[i]);
  571. } else if (mp->format == FORMAT_DELTA) {
  572. outp += sprintf(outp, "\t%8lld", t->counter[i]);
  573. } else if (mp->format == FORMAT_PERCENT) {
  574. outp += sprintf(outp, "\t%.2f", 100.0 * t->counter[i]/t->tsc);
  575. }
  576. }
  577. if (do_dts)
  578. outp += sprintf(outp, "\t%d", c->core_temp_c);
  579. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
  580. if (mp->format == FORMAT_RAW) {
  581. if (mp->width == 32)
  582. outp += sprintf(outp, "\t0x%08lx", (unsigned long) c->counter[i]);
  583. else
  584. outp += sprintf(outp, "\t0x%016llx", c->counter[i]);
  585. } else if (mp->format == FORMAT_DELTA) {
  586. outp += sprintf(outp, "\t%8lld", c->counter[i]);
  587. } else if (mp->format == FORMAT_PERCENT) {
  588. outp += sprintf(outp, "\t%.2f", 100.0 * c->counter[i]/t->tsc);
  589. }
  590. }
  591. /* print per-package data only for 1st core in package */
  592. if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  593. goto done;
  594. /* PkgTmp */
  595. if (do_ptm)
  596. outp += sprintf(outp, "\t%d", p->pkg_temp_c);
  597. /* GFXrc6 */
  598. if (do_gfx_rc6_ms) {
  599. if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */
  600. outp += sprintf(outp, "\t**.**");
  601. } else {
  602. outp += sprintf(outp, "\t%.2f",
  603. p->gfx_rc6_ms / 10.0 / interval_float);
  604. }
  605. }
  606. /* GFXMHz */
  607. if (do_gfx_mhz)
  608. outp += sprintf(outp, "\t%d", p->gfx_mhz);
  609. /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
  610. if (do_skl_residency) {
  611. outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
  612. outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
  613. outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
  614. outp += sprintf(outp, "\t%.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
  615. }
  616. if (do_pc2)
  617. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc2/t->tsc);
  618. if (do_pc3)
  619. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc3/t->tsc);
  620. if (do_pc6)
  621. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc6/t->tsc);
  622. if (do_pc7)
  623. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc7/t->tsc);
  624. if (do_c8_c9_c10) {
  625. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc8/t->tsc);
  626. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc9/t->tsc);
  627. outp += sprintf(outp, "\t%.2f", 100.0 * p->pc10/t->tsc);
  628. }
  629. /*
  630. * If measurement interval exceeds minimum RAPL Joule Counter range,
  631. * indicate that results are suspect by printing "**" in fraction place.
  632. */
  633. if (interval_float < rapl_joule_counter_range)
  634. fmt8 = "\t%.2f";
  635. else
  636. fmt8 = "%6.0f**";
  637. if (do_rapl && !rapl_joules) {
  638. if (do_rapl & RAPL_PKG)
  639. outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
  640. if (do_rapl & RAPL_CORES_ENERGY_STATUS)
  641. outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
  642. if (do_rapl & RAPL_GFX)
  643. outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
  644. if (do_rapl & RAPL_DRAM)
  645. outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
  646. if (do_rapl & RAPL_PKG_PERF_STATUS)
  647. outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
  648. if (do_rapl & RAPL_DRAM_PERF_STATUS)
  649. outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
  650. } else if (do_rapl && rapl_joules) {
  651. if (do_rapl & RAPL_PKG)
  652. outp += sprintf(outp, fmt8,
  653. p->energy_pkg * rapl_energy_units);
  654. if (do_rapl & RAPL_CORES)
  655. outp += sprintf(outp, fmt8,
  656. p->energy_cores * rapl_energy_units);
  657. if (do_rapl & RAPL_GFX)
  658. outp += sprintf(outp, fmt8,
  659. p->energy_gfx * rapl_energy_units);
  660. if (do_rapl & RAPL_DRAM)
  661. outp += sprintf(outp, fmt8,
  662. p->energy_dram * rapl_dram_energy_units);
  663. if (do_rapl & RAPL_PKG_PERF_STATUS)
  664. outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
  665. if (do_rapl & RAPL_DRAM_PERF_STATUS)
  666. outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
  667. }
  668. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
  669. if (mp->format == FORMAT_RAW) {
  670. if (mp->width == 32)
  671. outp += sprintf(outp, "\t0x%08lx", (unsigned long) p->counter[i]);
  672. else
  673. outp += sprintf(outp, "\t0x%016llx", p->counter[i]);
  674. } else if (mp->format == FORMAT_DELTA) {
  675. outp += sprintf(outp, "\t%8lld", p->counter[i]);
  676. } else if (mp->format == FORMAT_PERCENT) {
  677. outp += sprintf(outp, "\t%.2f", 100.0 * p->counter[i]/t->tsc);
  678. }
  679. }
  680. done:
  681. outp += sprintf(outp, "\n");
  682. return 0;
  683. }
  684. void flush_output_stdout(void)
  685. {
  686. FILE *filep;
  687. if (outf == stderr)
  688. filep = stdout;
  689. else
  690. filep = outf;
  691. fputs(output_buffer, filep);
  692. fflush(filep);
  693. outp = output_buffer;
  694. }
  695. void flush_output_stderr(void)
  696. {
  697. fputs(output_buffer, outf);
  698. fflush(outf);
  699. outp = output_buffer;
  700. }
  701. void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  702. {
  703. static int printed;
  704. if (!printed || !summary_only)
  705. print_header();
  706. if (topo.num_cpus > 1)
  707. format_counters(&average.threads, &average.cores,
  708. &average.packages);
  709. printed = 1;
  710. if (summary_only)
  711. return;
  712. for_all_cpus(format_counters, t, c, p);
  713. }
  714. #define DELTA_WRAP32(new, old) \
  715. if (new > old) { \
  716. old = new - old; \
  717. } else { \
  718. old = 0x100000000 + new - old; \
  719. }
  720. int
  721. delta_package(struct pkg_data *new, struct pkg_data *old)
  722. {
  723. int i;
  724. struct msr_counter *mp;
  725. if (do_skl_residency) {
  726. old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
  727. old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
  728. old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
  729. old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
  730. }
  731. old->pc2 = new->pc2 - old->pc2;
  732. if (do_pc3)
  733. old->pc3 = new->pc3 - old->pc3;
  734. if (do_pc6)
  735. old->pc6 = new->pc6 - old->pc6;
  736. if (do_pc7)
  737. old->pc7 = new->pc7 - old->pc7;
  738. old->pc8 = new->pc8 - old->pc8;
  739. old->pc9 = new->pc9 - old->pc9;
  740. old->pc10 = new->pc10 - old->pc10;
  741. old->pkg_temp_c = new->pkg_temp_c;
  742. /* flag an error when rc6 counter resets/wraps */
  743. if (old->gfx_rc6_ms > new->gfx_rc6_ms)
  744. old->gfx_rc6_ms = -1;
  745. else
  746. old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
  747. old->gfx_mhz = new->gfx_mhz;
  748. DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
  749. DELTA_WRAP32(new->energy_cores, old->energy_cores);
  750. DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
  751. DELTA_WRAP32(new->energy_dram, old->energy_dram);
  752. DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
  753. DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
  754. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
  755. if (mp->format == FORMAT_RAW)
  756. old->counter[i] = new->counter[i];
  757. else
  758. old->counter[i] = new->counter[i] - old->counter[i];
  759. }
  760. return 0;
  761. }
  762. void
  763. delta_core(struct core_data *new, struct core_data *old)
  764. {
  765. int i;
  766. struct msr_counter *mp;
  767. old->c3 = new->c3 - old->c3;
  768. old->c6 = new->c6 - old->c6;
  769. old->c7 = new->c7 - old->c7;
  770. old->core_temp_c = new->core_temp_c;
  771. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
  772. if (mp->format == FORMAT_RAW)
  773. old->counter[i] = new->counter[i];
  774. else
  775. old->counter[i] = new->counter[i] - old->counter[i];
  776. }
  777. }
  778. /*
  779. * old = new - old
  780. */
  781. int
  782. delta_thread(struct thread_data *new, struct thread_data *old,
  783. struct core_data *core_delta)
  784. {
  785. int i;
  786. struct msr_counter *mp;
  787. old->tsc = new->tsc - old->tsc;
  788. /* check for TSC < 1 Mcycles over interval */
  789. if (old->tsc < (1000 * 1000))
  790. errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
  791. "You can disable all c-states by booting with \"idle=poll\"\n"
  792. "or just the deep ones with \"processor.max_cstate=1\"");
  793. old->c1 = new->c1 - old->c1;
  794. if (has_aperf) {
  795. if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
  796. old->aperf = new->aperf - old->aperf;
  797. old->mperf = new->mperf - old->mperf;
  798. } else {
  799. return -1;
  800. }
  801. }
  802. if (use_c1_residency_msr) {
  803. /*
  804. * Some models have a dedicated C1 residency MSR,
  805. * which should be more accurate than the derivation below.
  806. */
  807. } else {
  808. /*
  809. * As counter collection is not atomic,
  810. * it is possible for mperf's non-halted cycles + idle states
  811. * to exceed TSC's all cycles: show c1 = 0% in that case.
  812. */
  813. if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
  814. old->c1 = 0;
  815. else {
  816. /* normal case, derive c1 */
  817. old->c1 = old->tsc - old->mperf - core_delta->c3
  818. - core_delta->c6 - core_delta->c7;
  819. }
  820. }
  821. if (old->mperf == 0) {
  822. if (debug > 1)
  823. fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id);
  824. old->mperf = 1; /* divide by 0 protection */
  825. }
  826. if (do_irq)
  827. old->irq_count = new->irq_count - old->irq_count;
  828. if (do_smi)
  829. old->smi_count = new->smi_count - old->smi_count;
  830. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
  831. if (mp->format == FORMAT_RAW)
  832. old->counter[i] = new->counter[i];
  833. else
  834. old->counter[i] = new->counter[i] - old->counter[i];
  835. }
  836. return 0;
  837. }
  838. int delta_cpu(struct thread_data *t, struct core_data *c,
  839. struct pkg_data *p, struct thread_data *t2,
  840. struct core_data *c2, struct pkg_data *p2)
  841. {
  842. int retval = 0;
  843. /* calculate core delta only for 1st thread in core */
  844. if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
  845. delta_core(c, c2);
  846. /* always calculate thread delta */
  847. retval = delta_thread(t, t2, c2); /* c2 is core delta */
  848. if (retval)
  849. return retval;
  850. /* calculate package delta only for 1st core in package */
  851. if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
  852. retval = delta_package(p, p2);
  853. return retval;
  854. }
  855. void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  856. {
  857. int i;
  858. struct msr_counter *mp;
  859. t->tsc = 0;
  860. t->aperf = 0;
  861. t->mperf = 0;
  862. t->c1 = 0;
  863. t->irq_count = 0;
  864. t->smi_count = 0;
  865. /* tells format_counters to dump all fields from this set */
  866. t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
  867. c->c3 = 0;
  868. c->c6 = 0;
  869. c->c7 = 0;
  870. c->core_temp_c = 0;
  871. p->pkg_wtd_core_c0 = 0;
  872. p->pkg_any_core_c0 = 0;
  873. p->pkg_any_gfxe_c0 = 0;
  874. p->pkg_both_core_gfxe_c0 = 0;
  875. p->pc2 = 0;
  876. if (do_pc3)
  877. p->pc3 = 0;
  878. if (do_pc6)
  879. p->pc6 = 0;
  880. if (do_pc7)
  881. p->pc7 = 0;
  882. p->pc8 = 0;
  883. p->pc9 = 0;
  884. p->pc10 = 0;
  885. p->energy_pkg = 0;
  886. p->energy_dram = 0;
  887. p->energy_cores = 0;
  888. p->energy_gfx = 0;
  889. p->rapl_pkg_perf_status = 0;
  890. p->rapl_dram_perf_status = 0;
  891. p->pkg_temp_c = 0;
  892. p->gfx_rc6_ms = 0;
  893. p->gfx_mhz = 0;
  894. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
  895. t->counter[i] = 0;
  896. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next)
  897. c->counter[i] = 0;
  898. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next)
  899. p->counter[i] = 0;
  900. }
  901. int sum_counters(struct thread_data *t, struct core_data *c,
  902. struct pkg_data *p)
  903. {
  904. int i;
  905. struct msr_counter *mp;
  906. average.threads.tsc += t->tsc;
  907. average.threads.aperf += t->aperf;
  908. average.threads.mperf += t->mperf;
  909. average.threads.c1 += t->c1;
  910. average.threads.irq_count += t->irq_count;
  911. average.threads.smi_count += t->smi_count;
  912. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
  913. if (mp->format == FORMAT_RAW)
  914. continue;
  915. average.threads.counter[i] += t->counter[i];
  916. }
  917. /* sum per-core values only for 1st thread in core */
  918. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  919. return 0;
  920. average.cores.c3 += c->c3;
  921. average.cores.c6 += c->c6;
  922. average.cores.c7 += c->c7;
  923. average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
  924. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
  925. if (mp->format == FORMAT_RAW)
  926. continue;
  927. average.cores.counter[i] += c->counter[i];
  928. }
  929. /* sum per-pkg values only for 1st core in pkg */
  930. if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  931. return 0;
  932. if (do_skl_residency) {
  933. average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
  934. average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
  935. average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
  936. average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
  937. }
  938. average.packages.pc2 += p->pc2;
  939. if (do_pc3)
  940. average.packages.pc3 += p->pc3;
  941. if (do_pc6)
  942. average.packages.pc6 += p->pc6;
  943. if (do_pc7)
  944. average.packages.pc7 += p->pc7;
  945. average.packages.pc8 += p->pc8;
  946. average.packages.pc9 += p->pc9;
  947. average.packages.pc10 += p->pc10;
  948. average.packages.energy_pkg += p->energy_pkg;
  949. average.packages.energy_dram += p->energy_dram;
  950. average.packages.energy_cores += p->energy_cores;
  951. average.packages.energy_gfx += p->energy_gfx;
  952. average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
  953. average.packages.gfx_mhz = p->gfx_mhz;
  954. average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
  955. average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
  956. average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
  957. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
  958. if (mp->format == FORMAT_RAW)
  959. continue;
  960. average.packages.counter[i] += p->counter[i];
  961. }
  962. return 0;
  963. }
  964. /*
  965. * sum the counters for all cpus in the system
  966. * compute the weighted average
  967. */
  968. void compute_average(struct thread_data *t, struct core_data *c,
  969. struct pkg_data *p)
  970. {
  971. int i;
  972. struct msr_counter *mp;
  973. clear_counters(&average.threads, &average.cores, &average.packages);
  974. for_all_cpus(sum_counters, t, c, p);
  975. average.threads.tsc /= topo.num_cpus;
  976. average.threads.aperf /= topo.num_cpus;
  977. average.threads.mperf /= topo.num_cpus;
  978. average.threads.c1 /= topo.num_cpus;
  979. average.cores.c3 /= topo.num_cores;
  980. average.cores.c6 /= topo.num_cores;
  981. average.cores.c7 /= topo.num_cores;
  982. if (do_skl_residency) {
  983. average.packages.pkg_wtd_core_c0 /= topo.num_packages;
  984. average.packages.pkg_any_core_c0 /= topo.num_packages;
  985. average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
  986. average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
  987. }
  988. average.packages.pc2 /= topo.num_packages;
  989. if (do_pc3)
  990. average.packages.pc3 /= topo.num_packages;
  991. if (do_pc6)
  992. average.packages.pc6 /= topo.num_packages;
  993. if (do_pc7)
  994. average.packages.pc7 /= topo.num_packages;
  995. average.packages.pc8 /= topo.num_packages;
  996. average.packages.pc9 /= topo.num_packages;
  997. average.packages.pc10 /= topo.num_packages;
  998. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
  999. if (mp->format == FORMAT_RAW)
  1000. continue;
  1001. average.threads.counter[i] /= topo.num_cpus;
  1002. }
  1003. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
  1004. if (mp->format == FORMAT_RAW)
  1005. continue;
  1006. average.cores.counter[i] /= topo.num_cores;
  1007. }
  1008. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
  1009. if (mp->format == FORMAT_RAW)
  1010. continue;
  1011. average.packages.counter[i] /= topo.num_packages;
  1012. }
  1013. }
  1014. static unsigned long long rdtsc(void)
  1015. {
  1016. unsigned int low, high;
  1017. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  1018. return low | ((unsigned long long)high) << 32;
  1019. }
  1020. /*
  1021. * get_counters(...)
  1022. * migrate to cpu
  1023. * acquire and record local counters for that cpu
  1024. */
  1025. int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  1026. {
  1027. int cpu = t->cpu_id;
  1028. unsigned long long msr;
  1029. int aperf_mperf_retry_count = 0;
  1030. struct msr_counter *mp;
  1031. int i;
  1032. if (cpu_migrate(cpu)) {
  1033. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  1034. return -1;
  1035. }
  1036. retry:
  1037. t->tsc = rdtsc(); /* we are running on local CPU of interest */
  1038. if (has_aperf) {
  1039. unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
  1040. /*
  1041. * The TSC, APERF and MPERF must be read together for
  1042. * APERF/MPERF and MPERF/TSC to give accurate results.
  1043. *
  1044. * Unfortunately, APERF and MPERF are read by
  1045. * individual system call, so delays may occur
  1046. * between them. If the time to read them
  1047. * varies by a large amount, we re-read them.
  1048. */
  1049. /*
  1050. * This initial dummy APERF read has been seen to
  1051. * reduce jitter in the subsequent reads.
  1052. */
  1053. if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
  1054. return -3;
  1055. t->tsc = rdtsc(); /* re-read close to APERF */
  1056. tsc_before = t->tsc;
  1057. if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
  1058. return -3;
  1059. tsc_between = rdtsc();
  1060. if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
  1061. return -4;
  1062. tsc_after = rdtsc();
  1063. aperf_time = tsc_between - tsc_before;
  1064. mperf_time = tsc_after - tsc_between;
  1065. /*
  1066. * If the system call latency to read APERF and MPERF
  1067. * differ by more than 2x, then try again.
  1068. */
  1069. if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) {
  1070. aperf_mperf_retry_count++;
  1071. if (aperf_mperf_retry_count < 5)
  1072. goto retry;
  1073. else
  1074. warnx("cpu%d jitter %lld %lld",
  1075. cpu, aperf_time, mperf_time);
  1076. }
  1077. aperf_mperf_retry_count = 0;
  1078. t->aperf = t->aperf * aperf_mperf_multiplier;
  1079. t->mperf = t->mperf * aperf_mperf_multiplier;
  1080. }
  1081. if (do_irq)
  1082. t->irq_count = irqs_per_cpu[cpu];
  1083. if (do_smi) {
  1084. if (get_msr(cpu, MSR_SMI_COUNT, &msr))
  1085. return -5;
  1086. t->smi_count = msr & 0xFFFFFFFF;
  1087. }
  1088. if (use_c1_residency_msr) {
  1089. if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
  1090. return -6;
  1091. }
  1092. for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) {
  1093. if (get_msr(cpu, mp->msr_num, &t->counter[i]))
  1094. return -10;
  1095. }
  1096. /* collect core counters only for 1st thread in core */
  1097. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  1098. return 0;
  1099. if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
  1100. if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
  1101. return -6;
  1102. }
  1103. if (do_nhm_cstates && !do_knl_cstates) {
  1104. if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
  1105. return -7;
  1106. } else if (do_knl_cstates) {
  1107. if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
  1108. return -7;
  1109. }
  1110. if (do_snb_cstates)
  1111. if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
  1112. return -8;
  1113. if (do_dts) {
  1114. if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
  1115. return -9;
  1116. c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
  1117. }
  1118. for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
  1119. if (get_msr(cpu, mp->msr_num, &c->counter[i]))
  1120. return -10;
  1121. }
  1122. /* collect package counters only for 1st core in package */
  1123. if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  1124. return 0;
  1125. if (do_skl_residency) {
  1126. if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
  1127. return -10;
  1128. if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
  1129. return -11;
  1130. if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
  1131. return -12;
  1132. if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
  1133. return -13;
  1134. }
  1135. if (do_pc3)
  1136. if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
  1137. return -9;
  1138. if (do_pc6)
  1139. if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
  1140. return -10;
  1141. if (do_pc2)
  1142. if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
  1143. return -11;
  1144. if (do_pc7)
  1145. if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
  1146. return -12;
  1147. if (do_c8_c9_c10) {
  1148. if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
  1149. return -13;
  1150. if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
  1151. return -13;
  1152. if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
  1153. return -13;
  1154. }
  1155. if (do_rapl & RAPL_PKG) {
  1156. if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
  1157. return -13;
  1158. p->energy_pkg = msr & 0xFFFFFFFF;
  1159. }
  1160. if (do_rapl & RAPL_CORES_ENERGY_STATUS) {
  1161. if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
  1162. return -14;
  1163. p->energy_cores = msr & 0xFFFFFFFF;
  1164. }
  1165. if (do_rapl & RAPL_DRAM) {
  1166. if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
  1167. return -15;
  1168. p->energy_dram = msr & 0xFFFFFFFF;
  1169. }
  1170. if (do_rapl & RAPL_GFX) {
  1171. if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
  1172. return -16;
  1173. p->energy_gfx = msr & 0xFFFFFFFF;
  1174. }
  1175. if (do_rapl & RAPL_PKG_PERF_STATUS) {
  1176. if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
  1177. return -16;
  1178. p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
  1179. }
  1180. if (do_rapl & RAPL_DRAM_PERF_STATUS) {
  1181. if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
  1182. return -16;
  1183. p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
  1184. }
  1185. if (do_ptm) {
  1186. if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
  1187. return -17;
  1188. p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
  1189. }
  1190. if (do_gfx_rc6_ms)
  1191. p->gfx_rc6_ms = gfx_cur_rc6_ms;
  1192. if (do_gfx_mhz)
  1193. p->gfx_mhz = gfx_cur_mhz;
  1194. for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
  1195. if (get_msr(cpu, mp->msr_num, &p->counter[i]))
  1196. return -10;
  1197. }
  1198. return 0;
  1199. }
  1200. /*
  1201. * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
  1202. * If you change the values, note they are used both in comparisons
  1203. * (>= PCL__7) and to index pkg_cstate_limit_strings[].
  1204. */
  1205. #define PCLUKN 0 /* Unknown */
  1206. #define PCLRSV 1 /* Reserved */
  1207. #define PCL__0 2 /* PC0 */
  1208. #define PCL__1 3 /* PC1 */
  1209. #define PCL__2 4 /* PC2 */
  1210. #define PCL__3 5 /* PC3 */
  1211. #define PCL__4 6 /* PC4 */
  1212. #define PCL__6 7 /* PC6 */
  1213. #define PCL_6N 8 /* PC6 No Retention */
  1214. #define PCL_6R 9 /* PC6 Retention */
  1215. #define PCL__7 10 /* PC7 */
  1216. #define PCL_7S 11 /* PC7 Shrink */
  1217. #define PCL__8 12 /* PC8 */
  1218. #define PCL__9 13 /* PC9 */
  1219. #define PCLUNL 14 /* Unlimited */
  1220. int pkg_cstate_limit = PCLUKN;
  1221. char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
  1222. "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
  1223. int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1224. int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1225. int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1226. int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1227. int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1228. int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1229. int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1230. int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
  1231. static void
  1232. calculate_tsc_tweak()
  1233. {
  1234. tsc_tweak = base_hz / tsc_hz;
  1235. }
  1236. static void
  1237. dump_nhm_platform_info(void)
  1238. {
  1239. unsigned long long msr;
  1240. unsigned int ratio;
  1241. get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
  1242. fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
  1243. ratio = (msr >> 40) & 0xFF;
  1244. fprintf(outf, "%d * %.0f = %.0f MHz max efficiency frequency\n",
  1245. ratio, bclk, ratio * bclk);
  1246. ratio = (msr >> 8) & 0xFF;
  1247. fprintf(outf, "%d * %.0f = %.0f MHz base frequency\n",
  1248. ratio, bclk, ratio * bclk);
  1249. get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
  1250. fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
  1251. base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
  1252. return;
  1253. }
  1254. static void
  1255. dump_hsw_turbo_ratio_limits(void)
  1256. {
  1257. unsigned long long msr;
  1258. unsigned int ratio;
  1259. get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
  1260. fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
  1261. ratio = (msr >> 8) & 0xFF;
  1262. if (ratio)
  1263. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
  1264. ratio, bclk, ratio * bclk);
  1265. ratio = (msr >> 0) & 0xFF;
  1266. if (ratio)
  1267. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
  1268. ratio, bclk, ratio * bclk);
  1269. return;
  1270. }
  1271. static void
  1272. dump_ivt_turbo_ratio_limits(void)
  1273. {
  1274. unsigned long long msr;
  1275. unsigned int ratio;
  1276. get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
  1277. fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
  1278. ratio = (msr >> 56) & 0xFF;
  1279. if (ratio)
  1280. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
  1281. ratio, bclk, ratio * bclk);
  1282. ratio = (msr >> 48) & 0xFF;
  1283. if (ratio)
  1284. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
  1285. ratio, bclk, ratio * bclk);
  1286. ratio = (msr >> 40) & 0xFF;
  1287. if (ratio)
  1288. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
  1289. ratio, bclk, ratio * bclk);
  1290. ratio = (msr >> 32) & 0xFF;
  1291. if (ratio)
  1292. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
  1293. ratio, bclk, ratio * bclk);
  1294. ratio = (msr >> 24) & 0xFF;
  1295. if (ratio)
  1296. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
  1297. ratio, bclk, ratio * bclk);
  1298. ratio = (msr >> 16) & 0xFF;
  1299. if (ratio)
  1300. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
  1301. ratio, bclk, ratio * bclk);
  1302. ratio = (msr >> 8) & 0xFF;
  1303. if (ratio)
  1304. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
  1305. ratio, bclk, ratio * bclk);
  1306. ratio = (msr >> 0) & 0xFF;
  1307. if (ratio)
  1308. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
  1309. ratio, bclk, ratio * bclk);
  1310. return;
  1311. }
  1312. static void
  1313. dump_nhm_turbo_ratio_limits(void)
  1314. {
  1315. unsigned long long msr;
  1316. unsigned int ratio;
  1317. get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
  1318. fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
  1319. ratio = (msr >> 56) & 0xFF;
  1320. if (ratio)
  1321. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
  1322. ratio, bclk, ratio * bclk);
  1323. ratio = (msr >> 48) & 0xFF;
  1324. if (ratio)
  1325. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
  1326. ratio, bclk, ratio * bclk);
  1327. ratio = (msr >> 40) & 0xFF;
  1328. if (ratio)
  1329. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
  1330. ratio, bclk, ratio * bclk);
  1331. ratio = (msr >> 32) & 0xFF;
  1332. if (ratio)
  1333. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
  1334. ratio, bclk, ratio * bclk);
  1335. ratio = (msr >> 24) & 0xFF;
  1336. if (ratio)
  1337. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
  1338. ratio, bclk, ratio * bclk);
  1339. ratio = (msr >> 16) & 0xFF;
  1340. if (ratio)
  1341. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
  1342. ratio, bclk, ratio * bclk);
  1343. ratio = (msr >> 8) & 0xFF;
  1344. if (ratio)
  1345. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
  1346. ratio, bclk, ratio * bclk);
  1347. ratio = (msr >> 0) & 0xFF;
  1348. if (ratio)
  1349. fprintf(outf, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
  1350. ratio, bclk, ratio * bclk);
  1351. return;
  1352. }
  1353. static void
  1354. dump_knl_turbo_ratio_limits(void)
  1355. {
  1356. const unsigned int buckets_no = 7;
  1357. unsigned long long msr;
  1358. int delta_cores, delta_ratio;
  1359. int i, b_nr;
  1360. unsigned int cores[buckets_no];
  1361. unsigned int ratio[buckets_no];
  1362. get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
  1363. fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n",
  1364. base_cpu, msr);
  1365. /**
  1366. * Turbo encoding in KNL is as follows:
  1367. * [0] -- Reserved
  1368. * [7:1] -- Base value of number of active cores of bucket 1.
  1369. * [15:8] -- Base value of freq ratio of bucket 1.
  1370. * [20:16] -- +ve delta of number of active cores of bucket 2.
  1371. * i.e. active cores of bucket 2 =
  1372. * active cores of bucket 1 + delta
  1373. * [23:21] -- Negative delta of freq ratio of bucket 2.
  1374. * i.e. freq ratio of bucket 2 =
  1375. * freq ratio of bucket 1 - delta
  1376. * [28:24]-- +ve delta of number of active cores of bucket 3.
  1377. * [31:29]-- -ve delta of freq ratio of bucket 3.
  1378. * [36:32]-- +ve delta of number of active cores of bucket 4.
  1379. * [39:37]-- -ve delta of freq ratio of bucket 4.
  1380. * [44:40]-- +ve delta of number of active cores of bucket 5.
  1381. * [47:45]-- -ve delta of freq ratio of bucket 5.
  1382. * [52:48]-- +ve delta of number of active cores of bucket 6.
  1383. * [55:53]-- -ve delta of freq ratio of bucket 6.
  1384. * [60:56]-- +ve delta of number of active cores of bucket 7.
  1385. * [63:61]-- -ve delta of freq ratio of bucket 7.
  1386. */
  1387. b_nr = 0;
  1388. cores[b_nr] = (msr & 0xFF) >> 1;
  1389. ratio[b_nr] = (msr >> 8) & 0xFF;
  1390. for (i = 16; i < 64; i += 8) {
  1391. delta_cores = (msr >> i) & 0x1F;
  1392. delta_ratio = (msr >> (i + 5)) & 0x7;
  1393. cores[b_nr + 1] = cores[b_nr] + delta_cores;
  1394. ratio[b_nr + 1] = ratio[b_nr] - delta_ratio;
  1395. b_nr++;
  1396. }
  1397. for (i = buckets_no - 1; i >= 0; i--)
  1398. if (i > 0 ? ratio[i] != ratio[i - 1] : 1)
  1399. fprintf(outf,
  1400. "%d * %.0f = %.0f MHz max turbo %d active cores\n",
  1401. ratio[i], bclk, ratio[i] * bclk, cores[i]);
  1402. }
  1403. static void
  1404. dump_nhm_cst_cfg(void)
  1405. {
  1406. unsigned long long msr;
  1407. get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
  1408. #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
  1409. #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
  1410. fprintf(outf, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr);
  1411. fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
  1412. (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
  1413. (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
  1414. (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
  1415. (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
  1416. (msr & (1 << 15)) ? "" : "UN",
  1417. (unsigned int)msr & 0xF,
  1418. pkg_cstate_limit_strings[pkg_cstate_limit]);
  1419. return;
  1420. }
  1421. static void
  1422. dump_config_tdp(void)
  1423. {
  1424. unsigned long long msr;
  1425. get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
  1426. fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
  1427. fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF);
  1428. get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
  1429. fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
  1430. if (msr) {
  1431. fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
  1432. fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
  1433. fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
  1434. fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF);
  1435. }
  1436. fprintf(outf, ")\n");
  1437. get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
  1438. fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
  1439. if (msr) {
  1440. fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF);
  1441. fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF);
  1442. fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF);
  1443. fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF);
  1444. }
  1445. fprintf(outf, ")\n");
  1446. get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
  1447. fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
  1448. if ((msr) & 0x3)
  1449. fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
  1450. fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
  1451. fprintf(outf, ")\n");
  1452. get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
  1453. fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
  1454. fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF);
  1455. fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
  1456. fprintf(outf, ")\n");
  1457. }
  1458. unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
  1459. void print_irtl(void)
  1460. {
  1461. unsigned long long msr;
  1462. get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
  1463. fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
  1464. fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
  1465. (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
  1466. get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
  1467. fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
  1468. fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
  1469. (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
  1470. get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
  1471. fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
  1472. fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
  1473. (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
  1474. if (!do_irtl_hsw)
  1475. return;
  1476. get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
  1477. fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
  1478. fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
  1479. (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
  1480. get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
  1481. fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
  1482. fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
  1483. (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
  1484. get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
  1485. fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
  1486. fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
  1487. (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
  1488. }
  1489. void free_fd_percpu(void)
  1490. {
  1491. int i;
  1492. for (i = 0; i < topo.max_cpu_num + 1; ++i) {
  1493. if (fd_percpu[i] != 0)
  1494. close(fd_percpu[i]);
  1495. }
  1496. free(fd_percpu);
  1497. }
  1498. void free_all_buffers(void)
  1499. {
  1500. CPU_FREE(cpu_present_set);
  1501. cpu_present_set = NULL;
  1502. cpu_present_setsize = 0;
  1503. CPU_FREE(cpu_affinity_set);
  1504. cpu_affinity_set = NULL;
  1505. cpu_affinity_setsize = 0;
  1506. free(thread_even);
  1507. free(core_even);
  1508. free(package_even);
  1509. thread_even = NULL;
  1510. core_even = NULL;
  1511. package_even = NULL;
  1512. free(thread_odd);
  1513. free(core_odd);
  1514. free(package_odd);
  1515. thread_odd = NULL;
  1516. core_odd = NULL;
  1517. package_odd = NULL;
  1518. free(output_buffer);
  1519. output_buffer = NULL;
  1520. outp = NULL;
  1521. free_fd_percpu();
  1522. free(irq_column_2_cpu);
  1523. free(irqs_per_cpu);
  1524. }
  1525. /*
  1526. * Open a file, and exit on failure
  1527. */
  1528. FILE *fopen_or_die(const char *path, const char *mode)
  1529. {
  1530. FILE *filep = fopen(path, mode);
  1531. if (!filep)
  1532. err(1, "%s: open failed", path);
  1533. return filep;
  1534. }
  1535. /*
  1536. * Parse a file containing a single int.
  1537. */
  1538. int parse_int_file(const char *fmt, ...)
  1539. {
  1540. va_list args;
  1541. char path[PATH_MAX];
  1542. FILE *filep;
  1543. int value;
  1544. va_start(args, fmt);
  1545. vsnprintf(path, sizeof(path), fmt, args);
  1546. va_end(args);
  1547. filep = fopen_or_die(path, "r");
  1548. if (fscanf(filep, "%d", &value) != 1)
  1549. err(1, "%s: failed to parse number from file", path);
  1550. fclose(filep);
  1551. return value;
  1552. }
  1553. /*
  1554. * get_cpu_position_in_core(cpu)
  1555. * return the position of the CPU among its HT siblings in the core
  1556. * return -1 if the sibling is not in list
  1557. */
  1558. int get_cpu_position_in_core(int cpu)
  1559. {
  1560. char path[64];
  1561. FILE *filep;
  1562. int this_cpu;
  1563. char character;
  1564. int i;
  1565. sprintf(path,
  1566. "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
  1567. cpu);
  1568. filep = fopen(path, "r");
  1569. if (filep == NULL) {
  1570. perror(path);
  1571. exit(1);
  1572. }
  1573. for (i = 0; i < topo.num_threads_per_core; i++) {
  1574. fscanf(filep, "%d", &this_cpu);
  1575. if (this_cpu == cpu) {
  1576. fclose(filep);
  1577. return i;
  1578. }
  1579. /* Account for no separator after last thread*/
  1580. if (i != (topo.num_threads_per_core - 1))
  1581. fscanf(filep, "%c", &character);
  1582. }
  1583. fclose(filep);
  1584. return -1;
  1585. }
  1586. /*
  1587. * cpu_is_first_core_in_package(cpu)
  1588. * return 1 if given CPU is 1st core in package
  1589. */
  1590. int cpu_is_first_core_in_package(int cpu)
  1591. {
  1592. return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
  1593. }
  1594. int get_physical_package_id(int cpu)
  1595. {
  1596. return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
  1597. }
  1598. int get_core_id(int cpu)
  1599. {
  1600. return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
  1601. }
  1602. int get_num_ht_siblings(int cpu)
  1603. {
  1604. char path[80];
  1605. FILE *filep;
  1606. int sib1;
  1607. int matches = 0;
  1608. char character;
  1609. char str[100];
  1610. char *ch;
  1611. sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
  1612. filep = fopen_or_die(path, "r");
  1613. /*
  1614. * file format:
  1615. * A ',' separated or '-' separated set of numbers
  1616. * (eg 1-2 or 1,3,4,5)
  1617. */
  1618. fscanf(filep, "%d%c\n", &sib1, &character);
  1619. fseek(filep, 0, SEEK_SET);
  1620. fgets(str, 100, filep);
  1621. ch = strchr(str, character);
  1622. while (ch != NULL) {
  1623. matches++;
  1624. ch = strchr(ch+1, character);
  1625. }
  1626. fclose(filep);
  1627. return matches+1;
  1628. }
  1629. /*
  1630. * run func(thread, core, package) in topology order
  1631. * skip non-present cpus
  1632. */
  1633. int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
  1634. struct pkg_data *, struct thread_data *, struct core_data *,
  1635. struct pkg_data *), struct thread_data *thread_base,
  1636. struct core_data *core_base, struct pkg_data *pkg_base,
  1637. struct thread_data *thread_base2, struct core_data *core_base2,
  1638. struct pkg_data *pkg_base2)
  1639. {
  1640. int retval, pkg_no, core_no, thread_no;
  1641. for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
  1642. for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
  1643. for (thread_no = 0; thread_no <
  1644. topo.num_threads_per_core; ++thread_no) {
  1645. struct thread_data *t, *t2;
  1646. struct core_data *c, *c2;
  1647. struct pkg_data *p, *p2;
  1648. t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
  1649. if (cpu_is_not_present(t->cpu_id))
  1650. continue;
  1651. t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
  1652. c = GET_CORE(core_base, core_no, pkg_no);
  1653. c2 = GET_CORE(core_base2, core_no, pkg_no);
  1654. p = GET_PKG(pkg_base, pkg_no);
  1655. p2 = GET_PKG(pkg_base2, pkg_no);
  1656. retval = func(t, c, p, t2, c2, p2);
  1657. if (retval)
  1658. return retval;
  1659. }
  1660. }
  1661. }
  1662. return 0;
  1663. }
  1664. /*
  1665. * run func(cpu) on every cpu in /proc/stat
  1666. * return max_cpu number
  1667. */
  1668. int for_all_proc_cpus(int (func)(int))
  1669. {
  1670. FILE *fp;
  1671. int cpu_num;
  1672. int retval;
  1673. fp = fopen_or_die(proc_stat, "r");
  1674. retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
  1675. if (retval != 0)
  1676. err(1, "%s: failed to parse format", proc_stat);
  1677. while (1) {
  1678. retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
  1679. if (retval != 1)
  1680. break;
  1681. retval = func(cpu_num);
  1682. if (retval) {
  1683. fclose(fp);
  1684. return(retval);
  1685. }
  1686. }
  1687. fclose(fp);
  1688. return 0;
  1689. }
  1690. void re_initialize(void)
  1691. {
  1692. free_all_buffers();
  1693. setup_all_buffers();
  1694. printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
  1695. }
  1696. /*
  1697. * count_cpus()
  1698. * remember the last one seen, it will be the max
  1699. */
  1700. int count_cpus(int cpu)
  1701. {
  1702. if (topo.max_cpu_num < cpu)
  1703. topo.max_cpu_num = cpu;
  1704. topo.num_cpus += 1;
  1705. return 0;
  1706. }
  1707. int mark_cpu_present(int cpu)
  1708. {
  1709. CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
  1710. return 0;
  1711. }
  1712. /*
  1713. * snapshot_proc_interrupts()
  1714. *
  1715. * read and record summary of /proc/interrupts
  1716. *
  1717. * return 1 if config change requires a restart, else return 0
  1718. */
  1719. int snapshot_proc_interrupts(void)
  1720. {
  1721. static FILE *fp;
  1722. int column, retval;
  1723. if (fp == NULL)
  1724. fp = fopen_or_die("/proc/interrupts", "r");
  1725. else
  1726. rewind(fp);
  1727. /* read 1st line of /proc/interrupts to get cpu* name for each column */
  1728. for (column = 0; column < topo.num_cpus; ++column) {
  1729. int cpu_number;
  1730. retval = fscanf(fp, " CPU%d", &cpu_number);
  1731. if (retval != 1)
  1732. break;
  1733. if (cpu_number > topo.max_cpu_num) {
  1734. warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num);
  1735. return 1;
  1736. }
  1737. irq_column_2_cpu[column] = cpu_number;
  1738. irqs_per_cpu[cpu_number] = 0;
  1739. }
  1740. /* read /proc/interrupt count lines and sum up irqs per cpu */
  1741. while (1) {
  1742. int column;
  1743. char buf[64];
  1744. retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */
  1745. if (retval != 1)
  1746. break;
  1747. /* read the count per cpu */
  1748. for (column = 0; column < topo.num_cpus; ++column) {
  1749. int cpu_number, irq_count;
  1750. retval = fscanf(fp, " %d", &irq_count);
  1751. if (retval != 1)
  1752. break;
  1753. cpu_number = irq_column_2_cpu[column];
  1754. irqs_per_cpu[cpu_number] += irq_count;
  1755. }
  1756. while (getc(fp) != '\n')
  1757. ; /* flush interrupt description */
  1758. }
  1759. return 0;
  1760. }
  1761. /*
  1762. * snapshot_gfx_rc6_ms()
  1763. *
  1764. * record snapshot of
  1765. * /sys/class/drm/card0/power/rc6_residency_ms
  1766. *
  1767. * return 1 if config change requires a restart, else return 0
  1768. */
  1769. int snapshot_gfx_rc6_ms(void)
  1770. {
  1771. FILE *fp;
  1772. int retval;
  1773. fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r");
  1774. retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms);
  1775. if (retval != 1)
  1776. err(1, "GFX rc6");
  1777. fclose(fp);
  1778. return 0;
  1779. }
  1780. /*
  1781. * snapshot_gfx_mhz()
  1782. *
  1783. * record snapshot of
  1784. * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz
  1785. *
  1786. * return 1 if config change requires a restart, else return 0
  1787. */
  1788. int snapshot_gfx_mhz(void)
  1789. {
  1790. static FILE *fp;
  1791. int retval;
  1792. if (fp == NULL)
  1793. fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
  1794. else
  1795. rewind(fp);
  1796. retval = fscanf(fp, "%d", &gfx_cur_mhz);
  1797. if (retval != 1)
  1798. err(1, "GFX MHz");
  1799. return 0;
  1800. }
  1801. /*
  1802. * snapshot /proc and /sys files
  1803. *
  1804. * return 1 if configuration restart needed, else return 0
  1805. */
  1806. int snapshot_proc_sysfs_files(void)
  1807. {
  1808. if (snapshot_proc_interrupts())
  1809. return 1;
  1810. if (do_gfx_rc6_ms)
  1811. snapshot_gfx_rc6_ms();
  1812. if (do_gfx_mhz)
  1813. snapshot_gfx_mhz();
  1814. return 0;
  1815. }
  1816. void turbostat_loop()
  1817. {
  1818. int retval;
  1819. int restarted = 0;
  1820. restart:
  1821. restarted++;
  1822. snapshot_proc_sysfs_files();
  1823. retval = for_all_cpus(get_counters, EVEN_COUNTERS);
  1824. if (retval < -1) {
  1825. exit(retval);
  1826. } else if (retval == -1) {
  1827. if (restarted > 1) {
  1828. exit(retval);
  1829. }
  1830. re_initialize();
  1831. goto restart;
  1832. }
  1833. restarted = 0;
  1834. gettimeofday(&tv_even, (struct timezone *)NULL);
  1835. while (1) {
  1836. if (for_all_proc_cpus(cpu_is_not_present)) {
  1837. re_initialize();
  1838. goto restart;
  1839. }
  1840. nanosleep(&interval_ts, NULL);
  1841. if (snapshot_proc_sysfs_files())
  1842. goto restart;
  1843. retval = for_all_cpus(get_counters, ODD_COUNTERS);
  1844. if (retval < -1) {
  1845. exit(retval);
  1846. } else if (retval == -1) {
  1847. re_initialize();
  1848. goto restart;
  1849. }
  1850. gettimeofday(&tv_odd, (struct timezone *)NULL);
  1851. timersub(&tv_odd, &tv_even, &tv_delta);
  1852. if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) {
  1853. re_initialize();
  1854. goto restart;
  1855. }
  1856. compute_average(EVEN_COUNTERS);
  1857. format_all_counters(EVEN_COUNTERS);
  1858. flush_output_stdout();
  1859. nanosleep(&interval_ts, NULL);
  1860. if (snapshot_proc_sysfs_files())
  1861. goto restart;
  1862. retval = for_all_cpus(get_counters, EVEN_COUNTERS);
  1863. if (retval < -1) {
  1864. exit(retval);
  1865. } else if (retval == -1) {
  1866. re_initialize();
  1867. goto restart;
  1868. }
  1869. gettimeofday(&tv_even, (struct timezone *)NULL);
  1870. timersub(&tv_even, &tv_odd, &tv_delta);
  1871. if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) {
  1872. re_initialize();
  1873. goto restart;
  1874. }
  1875. compute_average(ODD_COUNTERS);
  1876. format_all_counters(ODD_COUNTERS);
  1877. flush_output_stdout();
  1878. }
  1879. }
  1880. void check_dev_msr()
  1881. {
  1882. struct stat sb;
  1883. char pathname[32];
  1884. sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
  1885. if (stat(pathname, &sb))
  1886. if (system("/sbin/modprobe msr > /dev/null 2>&1"))
  1887. err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
  1888. }
  1889. void check_permissions()
  1890. {
  1891. struct __user_cap_header_struct cap_header_data;
  1892. cap_user_header_t cap_header = &cap_header_data;
  1893. struct __user_cap_data_struct cap_data_data;
  1894. cap_user_data_t cap_data = &cap_data_data;
  1895. extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
  1896. int do_exit = 0;
  1897. char pathname[32];
  1898. /* check for CAP_SYS_RAWIO */
  1899. cap_header->pid = getpid();
  1900. cap_header->version = _LINUX_CAPABILITY_VERSION;
  1901. if (capget(cap_header, cap_data) < 0)
  1902. err(-6, "capget(2) failed");
  1903. if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
  1904. do_exit++;
  1905. warnx("capget(CAP_SYS_RAWIO) failed,"
  1906. " try \"# setcap cap_sys_rawio=ep %s\"", progname);
  1907. }
  1908. /* test file permissions */
  1909. sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
  1910. if (euidaccess(pathname, R_OK)) {
  1911. do_exit++;
  1912. warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
  1913. }
  1914. /* if all else fails, thell them to be root */
  1915. if (do_exit)
  1916. if (getuid() != 0)
  1917. warnx("... or simply run as root");
  1918. if (do_exit)
  1919. exit(-6);
  1920. }
  1921. /*
  1922. * NHM adds support for additional MSRs:
  1923. *
  1924. * MSR_SMI_COUNT 0x00000034
  1925. *
  1926. * MSR_PLATFORM_INFO 0x000000ce
  1927. * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
  1928. *
  1929. * MSR_PKG_C3_RESIDENCY 0x000003f8
  1930. * MSR_PKG_C6_RESIDENCY 0x000003f9
  1931. * MSR_CORE_C3_RESIDENCY 0x000003fc
  1932. * MSR_CORE_C6_RESIDENCY 0x000003fd
  1933. *
  1934. * Side effect:
  1935. * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
  1936. */
  1937. int probe_nhm_msrs(unsigned int family, unsigned int model)
  1938. {
  1939. unsigned long long msr;
  1940. unsigned int base_ratio;
  1941. int *pkg_cstate_limits;
  1942. if (!genuine_intel)
  1943. return 0;
  1944. if (family != 6)
  1945. return 0;
  1946. bclk = discover_bclk(family, model);
  1947. switch (model) {
  1948. case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
  1949. case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
  1950. case 0x1F: /* Core i7 and i5 Processor - Nehalem */
  1951. case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */
  1952. case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */
  1953. case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
  1954. case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
  1955. pkg_cstate_limits = nhm_pkg_cstate_limits;
  1956. break;
  1957. case INTEL_FAM6_SANDYBRIDGE: /* SNB */
  1958. case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */
  1959. case INTEL_FAM6_IVYBRIDGE: /* IVB */
  1960. case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
  1961. pkg_cstate_limits = snb_pkg_cstate_limits;
  1962. break;
  1963. case INTEL_FAM6_HASWELL_CORE: /* HSW */
  1964. case INTEL_FAM6_HASWELL_X: /* HSX */
  1965. case INTEL_FAM6_HASWELL_ULT: /* HSW */
  1966. case INTEL_FAM6_HASWELL_GT3E: /* HSW */
  1967. case INTEL_FAM6_BROADWELL_CORE: /* BDW */
  1968. case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
  1969. case INTEL_FAM6_BROADWELL_X: /* BDX */
  1970. case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
  1971. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  1972. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  1973. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  1974. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  1975. pkg_cstate_limits = hsw_pkg_cstate_limits;
  1976. break;
  1977. case INTEL_FAM6_SKYLAKE_X: /* SKX */
  1978. pkg_cstate_limits = skx_pkg_cstate_limits;
  1979. break;
  1980. case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
  1981. case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
  1982. pkg_cstate_limits = slv_pkg_cstate_limits;
  1983. break;
  1984. case INTEL_FAM6_ATOM_AIRMONT: /* AMT */
  1985. pkg_cstate_limits = amt_pkg_cstate_limits;
  1986. break;
  1987. case INTEL_FAM6_XEON_PHI_KNL: /* PHI */
  1988. case INTEL_FAM6_XEON_PHI_KNM:
  1989. pkg_cstate_limits = phi_pkg_cstate_limits;
  1990. break;
  1991. case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
  1992. case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
  1993. pkg_cstate_limits = bxt_pkg_cstate_limits;
  1994. break;
  1995. default:
  1996. return 0;
  1997. }
  1998. get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
  1999. pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
  2000. get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
  2001. base_ratio = (msr >> 8) & 0xFF;
  2002. base_hz = base_ratio * bclk * 1000000;
  2003. has_base_hz = 1;
  2004. return 1;
  2005. }
  2006. int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
  2007. {
  2008. switch (model) {
  2009. /* Nehalem compatible, but do not include turbo-ratio limit support */
  2010. case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
  2011. case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
  2012. case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */
  2013. case INTEL_FAM6_XEON_PHI_KNM:
  2014. return 0;
  2015. default:
  2016. return 1;
  2017. }
  2018. }
  2019. int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
  2020. {
  2021. if (!genuine_intel)
  2022. return 0;
  2023. if (family != 6)
  2024. return 0;
  2025. switch (model) {
  2026. case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
  2027. case INTEL_FAM6_HASWELL_X: /* HSW Xeon */
  2028. return 1;
  2029. default:
  2030. return 0;
  2031. }
  2032. }
  2033. int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
  2034. {
  2035. if (!genuine_intel)
  2036. return 0;
  2037. if (family != 6)
  2038. return 0;
  2039. switch (model) {
  2040. case INTEL_FAM6_HASWELL_X: /* HSW Xeon */
  2041. return 1;
  2042. default:
  2043. return 0;
  2044. }
  2045. }
  2046. int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
  2047. {
  2048. if (!genuine_intel)
  2049. return 0;
  2050. if (family != 6)
  2051. return 0;
  2052. switch (model) {
  2053. case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
  2054. case INTEL_FAM6_XEON_PHI_KNM:
  2055. return 1;
  2056. default:
  2057. return 0;
  2058. }
  2059. }
  2060. int has_config_tdp(unsigned int family, unsigned int model)
  2061. {
  2062. if (!genuine_intel)
  2063. return 0;
  2064. if (family != 6)
  2065. return 0;
  2066. switch (model) {
  2067. case INTEL_FAM6_IVYBRIDGE: /* IVB */
  2068. case INTEL_FAM6_HASWELL_CORE: /* HSW */
  2069. case INTEL_FAM6_HASWELL_X: /* HSX */
  2070. case INTEL_FAM6_HASWELL_ULT: /* HSW */
  2071. case INTEL_FAM6_HASWELL_GT3E: /* HSW */
  2072. case INTEL_FAM6_BROADWELL_CORE: /* BDW */
  2073. case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
  2074. case INTEL_FAM6_BROADWELL_X: /* BDX */
  2075. case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
  2076. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  2077. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  2078. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  2079. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  2080. case INTEL_FAM6_SKYLAKE_X: /* SKX */
  2081. case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
  2082. case INTEL_FAM6_XEON_PHI_KNM:
  2083. return 1;
  2084. default:
  2085. return 0;
  2086. }
  2087. }
  2088. static void
  2089. dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
  2090. {
  2091. if (!do_nhm_platform_info)
  2092. return;
  2093. dump_nhm_platform_info();
  2094. if (has_hsw_turbo_ratio_limit(family, model))
  2095. dump_hsw_turbo_ratio_limits();
  2096. if (has_ivt_turbo_ratio_limit(family, model))
  2097. dump_ivt_turbo_ratio_limits();
  2098. if (has_nhm_turbo_ratio_limit(family, model))
  2099. dump_nhm_turbo_ratio_limits();
  2100. if (has_knl_turbo_ratio_limit(family, model))
  2101. dump_knl_turbo_ratio_limits();
  2102. if (has_config_tdp(family, model))
  2103. dump_config_tdp();
  2104. dump_nhm_cst_cfg();
  2105. }
  2106. /*
  2107. * print_epb()
  2108. * Decode the ENERGY_PERF_BIAS MSR
  2109. */
  2110. int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  2111. {
  2112. unsigned long long msr;
  2113. char *epb_string;
  2114. int cpu;
  2115. if (!has_epb)
  2116. return 0;
  2117. cpu = t->cpu_id;
  2118. /* EPB is per-package */
  2119. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  2120. return 0;
  2121. if (cpu_migrate(cpu)) {
  2122. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  2123. return -1;
  2124. }
  2125. if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
  2126. return 0;
  2127. switch (msr & 0xF) {
  2128. case ENERGY_PERF_BIAS_PERFORMANCE:
  2129. epb_string = "performance";
  2130. break;
  2131. case ENERGY_PERF_BIAS_NORMAL:
  2132. epb_string = "balanced";
  2133. break;
  2134. case ENERGY_PERF_BIAS_POWERSAVE:
  2135. epb_string = "powersave";
  2136. break;
  2137. default:
  2138. epb_string = "custom";
  2139. break;
  2140. }
  2141. fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
  2142. return 0;
  2143. }
  2144. /*
  2145. * print_hwp()
  2146. * Decode the MSR_HWP_CAPABILITIES
  2147. */
  2148. int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  2149. {
  2150. unsigned long long msr;
  2151. int cpu;
  2152. if (!has_hwp)
  2153. return 0;
  2154. cpu = t->cpu_id;
  2155. /* MSR_HWP_CAPABILITIES is per-package */
  2156. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  2157. return 0;
  2158. if (cpu_migrate(cpu)) {
  2159. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  2160. return -1;
  2161. }
  2162. if (get_msr(cpu, MSR_PM_ENABLE, &msr))
  2163. return 0;
  2164. fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n",
  2165. cpu, msr, (msr & (1 << 0)) ? "" : "No-");
  2166. /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
  2167. if ((msr & (1 << 0)) == 0)
  2168. return 0;
  2169. if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
  2170. return 0;
  2171. fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
  2172. "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
  2173. cpu, msr,
  2174. (unsigned int)HWP_HIGHEST_PERF(msr),
  2175. (unsigned int)HWP_GUARANTEED_PERF(msr),
  2176. (unsigned int)HWP_MOSTEFFICIENT_PERF(msr),
  2177. (unsigned int)HWP_LOWEST_PERF(msr));
  2178. if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
  2179. return 0;
  2180. fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
  2181. "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
  2182. cpu, msr,
  2183. (unsigned int)(((msr) >> 0) & 0xff),
  2184. (unsigned int)(((msr) >> 8) & 0xff),
  2185. (unsigned int)(((msr) >> 16) & 0xff),
  2186. (unsigned int)(((msr) >> 24) & 0xff),
  2187. (unsigned int)(((msr) >> 32) & 0xff3),
  2188. (unsigned int)(((msr) >> 42) & 0x1));
  2189. if (has_hwp_pkg) {
  2190. if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
  2191. return 0;
  2192. fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
  2193. "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
  2194. cpu, msr,
  2195. (unsigned int)(((msr) >> 0) & 0xff),
  2196. (unsigned int)(((msr) >> 8) & 0xff),
  2197. (unsigned int)(((msr) >> 16) & 0xff),
  2198. (unsigned int)(((msr) >> 24) & 0xff),
  2199. (unsigned int)(((msr) >> 32) & 0xff3));
  2200. }
  2201. if (has_hwp_notify) {
  2202. if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
  2203. return 0;
  2204. fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx "
  2205. "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n",
  2206. cpu, msr,
  2207. ((msr) & 0x1) ? "EN" : "Dis",
  2208. ((msr) & 0x2) ? "EN" : "Dis");
  2209. }
  2210. if (get_msr(cpu, MSR_HWP_STATUS, &msr))
  2211. return 0;
  2212. fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
  2213. "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
  2214. cpu, msr,
  2215. ((msr) & 0x1) ? "" : "No-",
  2216. ((msr) & 0x2) ? "" : "No-");
  2217. return 0;
  2218. }
  2219. /*
  2220. * print_perf_limit()
  2221. */
  2222. int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  2223. {
  2224. unsigned long long msr;
  2225. int cpu;
  2226. cpu = t->cpu_id;
  2227. /* per-package */
  2228. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  2229. return 0;
  2230. if (cpu_migrate(cpu)) {
  2231. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  2232. return -1;
  2233. }
  2234. if (do_core_perf_limit_reasons) {
  2235. get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
  2236. fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
  2237. fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
  2238. (msr & 1 << 15) ? "bit15, " : "",
  2239. (msr & 1 << 14) ? "bit14, " : "",
  2240. (msr & 1 << 13) ? "Transitions, " : "",
  2241. (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
  2242. (msr & 1 << 11) ? "PkgPwrL2, " : "",
  2243. (msr & 1 << 10) ? "PkgPwrL1, " : "",
  2244. (msr & 1 << 9) ? "CorePwr, " : "",
  2245. (msr & 1 << 8) ? "Amps, " : "",
  2246. (msr & 1 << 6) ? "VR-Therm, " : "",
  2247. (msr & 1 << 5) ? "Auto-HWP, " : "",
  2248. (msr & 1 << 4) ? "Graphics, " : "",
  2249. (msr & 1 << 2) ? "bit2, " : "",
  2250. (msr & 1 << 1) ? "ThermStatus, " : "",
  2251. (msr & 1 << 0) ? "PROCHOT, " : "");
  2252. fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
  2253. (msr & 1 << 31) ? "bit31, " : "",
  2254. (msr & 1 << 30) ? "bit30, " : "",
  2255. (msr & 1 << 29) ? "Transitions, " : "",
  2256. (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
  2257. (msr & 1 << 27) ? "PkgPwrL2, " : "",
  2258. (msr & 1 << 26) ? "PkgPwrL1, " : "",
  2259. (msr & 1 << 25) ? "CorePwr, " : "",
  2260. (msr & 1 << 24) ? "Amps, " : "",
  2261. (msr & 1 << 22) ? "VR-Therm, " : "",
  2262. (msr & 1 << 21) ? "Auto-HWP, " : "",
  2263. (msr & 1 << 20) ? "Graphics, " : "",
  2264. (msr & 1 << 18) ? "bit18, " : "",
  2265. (msr & 1 << 17) ? "ThermStatus, " : "",
  2266. (msr & 1 << 16) ? "PROCHOT, " : "");
  2267. }
  2268. if (do_gfx_perf_limit_reasons) {
  2269. get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
  2270. fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
  2271. fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)",
  2272. (msr & 1 << 0) ? "PROCHOT, " : "",
  2273. (msr & 1 << 1) ? "ThermStatus, " : "",
  2274. (msr & 1 << 4) ? "Graphics, " : "",
  2275. (msr & 1 << 6) ? "VR-Therm, " : "",
  2276. (msr & 1 << 8) ? "Amps, " : "",
  2277. (msr & 1 << 9) ? "GFXPwr, " : "",
  2278. (msr & 1 << 10) ? "PkgPwrL1, " : "",
  2279. (msr & 1 << 11) ? "PkgPwrL2, " : "");
  2280. fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n",
  2281. (msr & 1 << 16) ? "PROCHOT, " : "",
  2282. (msr & 1 << 17) ? "ThermStatus, " : "",
  2283. (msr & 1 << 20) ? "Graphics, " : "",
  2284. (msr & 1 << 22) ? "VR-Therm, " : "",
  2285. (msr & 1 << 24) ? "Amps, " : "",
  2286. (msr & 1 << 25) ? "GFXPwr, " : "",
  2287. (msr & 1 << 26) ? "PkgPwrL1, " : "",
  2288. (msr & 1 << 27) ? "PkgPwrL2, " : "");
  2289. }
  2290. if (do_ring_perf_limit_reasons) {
  2291. get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
  2292. fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
  2293. fprintf(outf, " (Active: %s%s%s%s%s%s)",
  2294. (msr & 1 << 0) ? "PROCHOT, " : "",
  2295. (msr & 1 << 1) ? "ThermStatus, " : "",
  2296. (msr & 1 << 6) ? "VR-Therm, " : "",
  2297. (msr & 1 << 8) ? "Amps, " : "",
  2298. (msr & 1 << 10) ? "PkgPwrL1, " : "",
  2299. (msr & 1 << 11) ? "PkgPwrL2, " : "");
  2300. fprintf(outf, " (Logged: %s%s%s%s%s%s)\n",
  2301. (msr & 1 << 16) ? "PROCHOT, " : "",
  2302. (msr & 1 << 17) ? "ThermStatus, " : "",
  2303. (msr & 1 << 22) ? "VR-Therm, " : "",
  2304. (msr & 1 << 24) ? "Amps, " : "",
  2305. (msr & 1 << 26) ? "PkgPwrL1, " : "",
  2306. (msr & 1 << 27) ? "PkgPwrL2, " : "");
  2307. }
  2308. return 0;
  2309. }
  2310. #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
  2311. #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
  2312. double get_tdp(unsigned int model)
  2313. {
  2314. unsigned long long msr;
  2315. if (do_rapl & RAPL_PKG_POWER_INFO)
  2316. if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
  2317. return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
  2318. switch (model) {
  2319. case INTEL_FAM6_ATOM_SILVERMONT1:
  2320. case INTEL_FAM6_ATOM_SILVERMONT2:
  2321. return 30.0;
  2322. default:
  2323. return 135.0;
  2324. }
  2325. }
  2326. /*
  2327. * rapl_dram_energy_units_probe()
  2328. * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
  2329. */
  2330. static double
  2331. rapl_dram_energy_units_probe(int model, double rapl_energy_units)
  2332. {
  2333. /* only called for genuine_intel, family 6 */
  2334. switch (model) {
  2335. case INTEL_FAM6_HASWELL_X: /* HSX */
  2336. case INTEL_FAM6_BROADWELL_X: /* BDX */
  2337. case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
  2338. case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
  2339. case INTEL_FAM6_XEON_PHI_KNM:
  2340. return (rapl_dram_energy_units = 15.3 / 1000000);
  2341. default:
  2342. return (rapl_energy_units);
  2343. }
  2344. }
  2345. /*
  2346. * rapl_probe()
  2347. *
  2348. * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
  2349. */
  2350. void rapl_probe(unsigned int family, unsigned int model)
  2351. {
  2352. unsigned long long msr;
  2353. unsigned int time_unit;
  2354. double tdp;
  2355. if (!genuine_intel)
  2356. return;
  2357. if (family != 6)
  2358. return;
  2359. switch (model) {
  2360. case INTEL_FAM6_SANDYBRIDGE:
  2361. case INTEL_FAM6_IVYBRIDGE:
  2362. case INTEL_FAM6_HASWELL_CORE: /* HSW */
  2363. case INTEL_FAM6_HASWELL_ULT: /* HSW */
  2364. case INTEL_FAM6_HASWELL_GT3E: /* HSW */
  2365. case INTEL_FAM6_BROADWELL_CORE: /* BDW */
  2366. case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
  2367. do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
  2368. break;
  2369. case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
  2370. do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
  2371. break;
  2372. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  2373. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  2374. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  2375. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  2376. do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
  2377. break;
  2378. case INTEL_FAM6_HASWELL_X: /* HSX */
  2379. case INTEL_FAM6_BROADWELL_X: /* BDX */
  2380. case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
  2381. case INTEL_FAM6_SKYLAKE_X: /* SKX */
  2382. case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
  2383. case INTEL_FAM6_XEON_PHI_KNM:
  2384. do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
  2385. break;
  2386. case INTEL_FAM6_SANDYBRIDGE_X:
  2387. case INTEL_FAM6_IVYBRIDGE_X:
  2388. do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
  2389. break;
  2390. case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
  2391. case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
  2392. do_rapl = RAPL_PKG | RAPL_CORES;
  2393. break;
  2394. case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
  2395. do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
  2396. break;
  2397. default:
  2398. return;
  2399. }
  2400. /* units on package 0, verify later other packages match */
  2401. if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
  2402. return;
  2403. rapl_power_units = 1.0 / (1 << (msr & 0xF));
  2404. if (model == INTEL_FAM6_ATOM_SILVERMONT1)
  2405. rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
  2406. else
  2407. rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
  2408. rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
  2409. time_unit = msr >> 16 & 0xF;
  2410. if (time_unit == 0)
  2411. time_unit = 0xA;
  2412. rapl_time_units = 1.0 / (1 << (time_unit));
  2413. tdp = get_tdp(model);
  2414. rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
  2415. if (debug)
  2416. fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
  2417. return;
  2418. }
  2419. void perf_limit_reasons_probe(unsigned int family, unsigned int model)
  2420. {
  2421. if (!genuine_intel)
  2422. return;
  2423. if (family != 6)
  2424. return;
  2425. switch (model) {
  2426. case INTEL_FAM6_HASWELL_CORE: /* HSW */
  2427. case INTEL_FAM6_HASWELL_ULT: /* HSW */
  2428. case INTEL_FAM6_HASWELL_GT3E: /* HSW */
  2429. do_gfx_perf_limit_reasons = 1;
  2430. case INTEL_FAM6_HASWELL_X: /* HSX */
  2431. do_core_perf_limit_reasons = 1;
  2432. do_ring_perf_limit_reasons = 1;
  2433. default:
  2434. return;
  2435. }
  2436. }
  2437. int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  2438. {
  2439. unsigned long long msr;
  2440. unsigned int dts;
  2441. int cpu;
  2442. if (!(do_dts || do_ptm))
  2443. return 0;
  2444. cpu = t->cpu_id;
  2445. /* DTS is per-core, no need to print for each thread */
  2446. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  2447. return 0;
  2448. if (cpu_migrate(cpu)) {
  2449. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  2450. return -1;
  2451. }
  2452. if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
  2453. if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
  2454. return 0;
  2455. dts = (msr >> 16) & 0x7F;
  2456. fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
  2457. cpu, msr, tcc_activation_temp - dts);
  2458. #ifdef THERM_DEBUG
  2459. if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
  2460. return 0;
  2461. dts = (msr >> 16) & 0x7F;
  2462. dts2 = (msr >> 8) & 0x7F;
  2463. fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
  2464. cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
  2465. #endif
  2466. }
  2467. if (do_dts) {
  2468. unsigned int resolution;
  2469. if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
  2470. return 0;
  2471. dts = (msr >> 16) & 0x7F;
  2472. resolution = (msr >> 27) & 0xF;
  2473. fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
  2474. cpu, msr, tcc_activation_temp - dts, resolution);
  2475. #ifdef THERM_DEBUG
  2476. if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
  2477. return 0;
  2478. dts = (msr >> 16) & 0x7F;
  2479. dts2 = (msr >> 8) & 0x7F;
  2480. fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
  2481. cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
  2482. #endif
  2483. }
  2484. return 0;
  2485. }
  2486. void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
  2487. {
  2488. fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
  2489. cpu, label,
  2490. ((msr >> 15) & 1) ? "EN" : "DIS",
  2491. ((msr >> 0) & 0x7FFF) * rapl_power_units,
  2492. (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
  2493. (((msr >> 16) & 1) ? "EN" : "DIS"));
  2494. return;
  2495. }
  2496. int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  2497. {
  2498. unsigned long long msr;
  2499. int cpu;
  2500. if (!do_rapl)
  2501. return 0;
  2502. /* RAPL counters are per package, so print only for 1st thread/package */
  2503. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  2504. return 0;
  2505. cpu = t->cpu_id;
  2506. if (cpu_migrate(cpu)) {
  2507. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  2508. return -1;
  2509. }
  2510. if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
  2511. return -1;
  2512. if (debug) {
  2513. fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
  2514. "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
  2515. rapl_power_units, rapl_energy_units, rapl_time_units);
  2516. }
  2517. if (do_rapl & RAPL_PKG_POWER_INFO) {
  2518. if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
  2519. return -5;
  2520. fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
  2521. cpu, msr,
  2522. ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  2523. ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  2524. ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  2525. ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
  2526. }
  2527. if (do_rapl & RAPL_PKG) {
  2528. if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
  2529. return -9;
  2530. fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
  2531. cpu, msr, (msr >> 63) & 1 ? "": "UN");
  2532. print_power_limit_msr(cpu, msr, "PKG Limit #1");
  2533. fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
  2534. cpu,
  2535. ((msr >> 47) & 1) ? "EN" : "DIS",
  2536. ((msr >> 32) & 0x7FFF) * rapl_power_units,
  2537. (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
  2538. ((msr >> 48) & 1) ? "EN" : "DIS");
  2539. }
  2540. if (do_rapl & RAPL_DRAM_POWER_INFO) {
  2541. if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
  2542. return -6;
  2543. fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
  2544. cpu, msr,
  2545. ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  2546. ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  2547. ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  2548. ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
  2549. }
  2550. if (do_rapl & RAPL_DRAM) {
  2551. if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
  2552. return -9;
  2553. fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
  2554. cpu, msr, (msr >> 31) & 1 ? "": "UN");
  2555. print_power_limit_msr(cpu, msr, "DRAM Limit");
  2556. }
  2557. if (do_rapl & RAPL_CORE_POLICY) {
  2558. if (debug) {
  2559. if (get_msr(cpu, MSR_PP0_POLICY, &msr))
  2560. return -7;
  2561. fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
  2562. }
  2563. }
  2564. if (do_rapl & RAPL_CORES_POWER_LIMIT) {
  2565. if (debug) {
  2566. if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
  2567. return -9;
  2568. fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
  2569. cpu, msr, (msr >> 31) & 1 ? "": "UN");
  2570. print_power_limit_msr(cpu, msr, "Cores Limit");
  2571. }
  2572. }
  2573. if (do_rapl & RAPL_GFX) {
  2574. if (debug) {
  2575. if (get_msr(cpu, MSR_PP1_POLICY, &msr))
  2576. return -8;
  2577. fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
  2578. if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
  2579. return -9;
  2580. fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
  2581. cpu, msr, (msr >> 31) & 1 ? "": "UN");
  2582. print_power_limit_msr(cpu, msr, "GFX Limit");
  2583. }
  2584. }
  2585. return 0;
  2586. }
  2587. /*
  2588. * SNB adds support for additional MSRs:
  2589. *
  2590. * MSR_PKG_C7_RESIDENCY 0x000003fa
  2591. * MSR_CORE_C7_RESIDENCY 0x000003fe
  2592. * MSR_PKG_C2_RESIDENCY 0x0000060d
  2593. */
  2594. int has_snb_msrs(unsigned int family, unsigned int model)
  2595. {
  2596. if (!genuine_intel)
  2597. return 0;
  2598. switch (model) {
  2599. case INTEL_FAM6_SANDYBRIDGE:
  2600. case INTEL_FAM6_SANDYBRIDGE_X:
  2601. case INTEL_FAM6_IVYBRIDGE: /* IVB */
  2602. case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
  2603. case INTEL_FAM6_HASWELL_CORE: /* HSW */
  2604. case INTEL_FAM6_HASWELL_X: /* HSW */
  2605. case INTEL_FAM6_HASWELL_ULT: /* HSW */
  2606. case INTEL_FAM6_HASWELL_GT3E: /* HSW */
  2607. case INTEL_FAM6_BROADWELL_CORE: /* BDW */
  2608. case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
  2609. case INTEL_FAM6_BROADWELL_X: /* BDX */
  2610. case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
  2611. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  2612. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  2613. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  2614. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  2615. case INTEL_FAM6_SKYLAKE_X: /* SKX */
  2616. case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
  2617. case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
  2618. return 1;
  2619. }
  2620. return 0;
  2621. }
  2622. /*
  2623. * HSW adds support for additional MSRs:
  2624. *
  2625. * MSR_PKG_C8_RESIDENCY 0x00000630
  2626. * MSR_PKG_C9_RESIDENCY 0x00000631
  2627. * MSR_PKG_C10_RESIDENCY 0x00000632
  2628. *
  2629. * MSR_PKGC8_IRTL 0x00000633
  2630. * MSR_PKGC9_IRTL 0x00000634
  2631. * MSR_PKGC10_IRTL 0x00000635
  2632. *
  2633. */
  2634. int has_hsw_msrs(unsigned int family, unsigned int model)
  2635. {
  2636. if (!genuine_intel)
  2637. return 0;
  2638. switch (model) {
  2639. case INTEL_FAM6_HASWELL_ULT: /* HSW */
  2640. case INTEL_FAM6_BROADWELL_CORE: /* BDW */
  2641. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  2642. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  2643. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  2644. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  2645. case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
  2646. return 1;
  2647. }
  2648. return 0;
  2649. }
  2650. /*
  2651. * SKL adds support for additional MSRS:
  2652. *
  2653. * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
  2654. * MSR_PKG_ANY_CORE_C0_RES 0x00000659
  2655. * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
  2656. * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
  2657. */
  2658. int has_skl_msrs(unsigned int family, unsigned int model)
  2659. {
  2660. if (!genuine_intel)
  2661. return 0;
  2662. switch (model) {
  2663. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  2664. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  2665. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  2666. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  2667. return 1;
  2668. }
  2669. return 0;
  2670. }
  2671. int is_slm(unsigned int family, unsigned int model)
  2672. {
  2673. if (!genuine_intel)
  2674. return 0;
  2675. switch (model) {
  2676. case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
  2677. case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
  2678. return 1;
  2679. }
  2680. return 0;
  2681. }
  2682. int is_knl(unsigned int family, unsigned int model)
  2683. {
  2684. if (!genuine_intel)
  2685. return 0;
  2686. switch (model) {
  2687. case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
  2688. case INTEL_FAM6_XEON_PHI_KNM:
  2689. return 1;
  2690. }
  2691. return 0;
  2692. }
  2693. unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
  2694. {
  2695. if (is_knl(family, model))
  2696. return 1024;
  2697. return 1;
  2698. }
  2699. #define SLM_BCLK_FREQS 5
  2700. double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
  2701. double slm_bclk(void)
  2702. {
  2703. unsigned long long msr = 3;
  2704. unsigned int i;
  2705. double freq;
  2706. if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
  2707. fprintf(outf, "SLM BCLK: unknown\n");
  2708. i = msr & 0xf;
  2709. if (i >= SLM_BCLK_FREQS) {
  2710. fprintf(outf, "SLM BCLK[%d] invalid\n", i);
  2711. i = 3;
  2712. }
  2713. freq = slm_freq_table[i];
  2714. fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq);
  2715. return freq;
  2716. }
  2717. double discover_bclk(unsigned int family, unsigned int model)
  2718. {
  2719. if (has_snb_msrs(family, model) || is_knl(family, model))
  2720. return 100.00;
  2721. else if (is_slm(family, model))
  2722. return slm_bclk();
  2723. else
  2724. return 133.33;
  2725. }
  2726. /*
  2727. * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
  2728. * the Thermal Control Circuit (TCC) activates.
  2729. * This is usually equal to tjMax.
  2730. *
  2731. * Older processors do not have this MSR, so there we guess,
  2732. * but also allow cmdline over-ride with -T.
  2733. *
  2734. * Several MSR temperature values are in units of degrees-C
  2735. * below this value, including the Digital Thermal Sensor (DTS),
  2736. * Package Thermal Management Sensor (PTM), and thermal event thresholds.
  2737. */
  2738. int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  2739. {
  2740. unsigned long long msr;
  2741. unsigned int target_c_local;
  2742. int cpu;
  2743. /* tcc_activation_temp is used only for dts or ptm */
  2744. if (!(do_dts || do_ptm))
  2745. return 0;
  2746. /* this is a per-package concept */
  2747. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  2748. return 0;
  2749. cpu = t->cpu_id;
  2750. if (cpu_migrate(cpu)) {
  2751. fprintf(outf, "Could not migrate to CPU %d\n", cpu);
  2752. return -1;
  2753. }
  2754. if (tcc_activation_temp_override != 0) {
  2755. tcc_activation_temp = tcc_activation_temp_override;
  2756. fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
  2757. cpu, tcc_activation_temp);
  2758. return 0;
  2759. }
  2760. /* Temperature Target MSR is Nehalem and newer only */
  2761. if (!do_nhm_platform_info)
  2762. goto guess;
  2763. if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
  2764. goto guess;
  2765. target_c_local = (msr >> 16) & 0xFF;
  2766. if (debug)
  2767. fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
  2768. cpu, msr, target_c_local);
  2769. if (!target_c_local)
  2770. goto guess;
  2771. tcc_activation_temp = target_c_local;
  2772. return 0;
  2773. guess:
  2774. tcc_activation_temp = TJMAX_DEFAULT;
  2775. fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
  2776. cpu, tcc_activation_temp);
  2777. return 0;
  2778. }
  2779. void decode_feature_control_msr(void)
  2780. {
  2781. unsigned long long msr;
  2782. if (!get_msr(base_cpu, MSR_IA32_FEATURE_CONTROL, &msr))
  2783. fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n",
  2784. base_cpu, msr,
  2785. msr & FEATURE_CONTROL_LOCKED ? "" : "UN-",
  2786. msr & (1 << 18) ? "SGX" : "");
  2787. }
  2788. void decode_misc_enable_msr(void)
  2789. {
  2790. unsigned long long msr;
  2791. if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
  2792. fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%s %s %s)\n",
  2793. base_cpu, msr,
  2794. msr & (1 << 3) ? "TCC" : "",
  2795. msr & (1 << 16) ? "EIST" : "",
  2796. msr & (1 << 18) ? "MONITOR" : "");
  2797. }
  2798. /*
  2799. * Decode MSR_MISC_PWR_MGMT
  2800. *
  2801. * Decode the bits according to the Nehalem documentation
  2802. * bit[0] seems to continue to have same meaning going forward
  2803. * bit[1] less so...
  2804. */
  2805. void decode_misc_pwr_mgmt_msr(void)
  2806. {
  2807. unsigned long long msr;
  2808. if (!do_nhm_platform_info)
  2809. return;
  2810. if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr))
  2811. fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n",
  2812. base_cpu, msr,
  2813. msr & (1 << 0) ? "DIS" : "EN",
  2814. msr & (1 << 1) ? "EN" : "DIS",
  2815. msr & (1 << 8) ? "EN" : "DIS");
  2816. }
  2817. void process_cpuid()
  2818. {
  2819. unsigned int eax, ebx, ecx, edx, max_level, max_extended_level;
  2820. unsigned int fms, family, model, stepping;
  2821. eax = ebx = ecx = edx = 0;
  2822. __cpuid(0, max_level, ebx, ecx, edx);
  2823. if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
  2824. genuine_intel = 1;
  2825. if (debug)
  2826. fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
  2827. (char *)&ebx, (char *)&edx, (char *)&ecx);
  2828. __cpuid(1, fms, ebx, ecx, edx);
  2829. family = (fms >> 8) & 0xf;
  2830. model = (fms >> 4) & 0xf;
  2831. stepping = fms & 0xf;
  2832. if (family == 6 || family == 0xf)
  2833. model += ((fms >> 16) & 0xf) << 4;
  2834. if (debug) {
  2835. fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
  2836. max_level, family, model, stepping, family, model, stepping);
  2837. fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
  2838. ecx & (1 << 0) ? "SSE3" : "-",
  2839. ecx & (1 << 3) ? "MONITOR" : "-",
  2840. ecx & (1 << 6) ? "SMX" : "-",
  2841. ecx & (1 << 7) ? "EIST" : "-",
  2842. ecx & (1 << 8) ? "TM2" : "-",
  2843. edx & (1 << 4) ? "TSC" : "-",
  2844. edx & (1 << 5) ? "MSR" : "-",
  2845. edx & (1 << 22) ? "ACPI-TM" : "-",
  2846. edx & (1 << 29) ? "TM" : "-");
  2847. }
  2848. if (!(edx & (1 << 5)))
  2849. errx(1, "CPUID: no MSR");
  2850. /*
  2851. * check max extended function levels of CPUID.
  2852. * This is needed to check for invariant TSC.
  2853. * This check is valid for both Intel and AMD.
  2854. */
  2855. ebx = ecx = edx = 0;
  2856. __cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
  2857. if (max_extended_level >= 0x80000007) {
  2858. /*
  2859. * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
  2860. * this check is valid for both Intel and AMD
  2861. */
  2862. __cpuid(0x80000007, eax, ebx, ecx, edx);
  2863. has_invariant_tsc = edx & (1 << 8);
  2864. }
  2865. /*
  2866. * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
  2867. * this check is valid for both Intel and AMD
  2868. */
  2869. __cpuid(0x6, eax, ebx, ecx, edx);
  2870. has_aperf = ecx & (1 << 0);
  2871. do_dts = eax & (1 << 0);
  2872. do_ptm = eax & (1 << 6);
  2873. has_hwp = eax & (1 << 7);
  2874. has_hwp_notify = eax & (1 << 8);
  2875. has_hwp_activity_window = eax & (1 << 9);
  2876. has_hwp_epp = eax & (1 << 10);
  2877. has_hwp_pkg = eax & (1 << 11);
  2878. has_epb = ecx & (1 << 3);
  2879. if (debug)
  2880. fprintf(outf, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sHWP, "
  2881. "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
  2882. has_aperf ? "" : "No-",
  2883. do_dts ? "" : "No-",
  2884. do_ptm ? "" : "No-",
  2885. has_hwp ? "" : "No-",
  2886. has_hwp_notify ? "" : "No-",
  2887. has_hwp_activity_window ? "" : "No-",
  2888. has_hwp_epp ? "" : "No-",
  2889. has_hwp_pkg ? "" : "No-",
  2890. has_epb ? "" : "No-");
  2891. if (debug)
  2892. decode_misc_enable_msr();
  2893. if (max_level >= 0x7 && debug) {
  2894. int has_sgx;
  2895. ecx = 0;
  2896. __cpuid_count(0x7, 0, eax, ebx, ecx, edx);
  2897. has_sgx = ebx & (1 << 2);
  2898. fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-");
  2899. if (has_sgx)
  2900. decode_feature_control_msr();
  2901. }
  2902. if (max_level >= 0x15) {
  2903. unsigned int eax_crystal;
  2904. unsigned int ebx_tsc;
  2905. /*
  2906. * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
  2907. */
  2908. eax_crystal = ebx_tsc = crystal_hz = edx = 0;
  2909. __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx);
  2910. if (ebx_tsc != 0) {
  2911. if (debug && (ebx != 0))
  2912. fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
  2913. eax_crystal, ebx_tsc, crystal_hz);
  2914. if (crystal_hz == 0)
  2915. switch(model) {
  2916. case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
  2917. case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
  2918. case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
  2919. case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
  2920. crystal_hz = 24000000; /* 24.0 MHz */
  2921. break;
  2922. case INTEL_FAM6_SKYLAKE_X: /* SKX */
  2923. case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
  2924. crystal_hz = 25000000; /* 25.0 MHz */
  2925. break;
  2926. case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
  2927. crystal_hz = 19200000; /* 19.2 MHz */
  2928. break;
  2929. default:
  2930. crystal_hz = 0;
  2931. }
  2932. if (crystal_hz) {
  2933. tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
  2934. if (debug)
  2935. fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
  2936. tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal);
  2937. }
  2938. }
  2939. }
  2940. if (max_level >= 0x16) {
  2941. unsigned int base_mhz, max_mhz, bus_mhz, edx;
  2942. /*
  2943. * CPUID 16H Base MHz, Max MHz, Bus MHz
  2944. */
  2945. base_mhz = max_mhz = bus_mhz = edx = 0;
  2946. __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx);
  2947. if (debug)
  2948. fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n",
  2949. base_mhz, max_mhz, bus_mhz);
  2950. }
  2951. if (has_aperf)
  2952. aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
  2953. do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
  2954. do_snb_cstates = has_snb_msrs(family, model);
  2955. do_irtl_snb = has_snb_msrs(family, model);
  2956. do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
  2957. do_pc3 = (pkg_cstate_limit >= PCL__3);
  2958. do_pc6 = (pkg_cstate_limit >= PCL__6);
  2959. do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
  2960. do_c8_c9_c10 = has_hsw_msrs(family, model);
  2961. do_irtl_hsw = has_hsw_msrs(family, model);
  2962. do_skl_residency = has_skl_msrs(family, model);
  2963. do_slm_cstates = is_slm(family, model);
  2964. do_knl_cstates = is_knl(family, model);
  2965. if (debug)
  2966. decode_misc_pwr_mgmt_msr();
  2967. rapl_probe(family, model);
  2968. perf_limit_reasons_probe(family, model);
  2969. if (debug)
  2970. dump_cstate_pstate_config_info(family, model);
  2971. if (has_skl_msrs(family, model))
  2972. calculate_tsc_tweak();
  2973. do_gfx_rc6_ms = !access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK);
  2974. do_gfx_mhz = !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK);
  2975. return;
  2976. }
  2977. void help()
  2978. {
  2979. fprintf(outf,
  2980. "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
  2981. "\n"
  2982. "Turbostat forks the specified COMMAND and prints statistics\n"
  2983. "when COMMAND completes.\n"
  2984. "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
  2985. "to print statistics, until interrupted.\n"
  2986. "--add add a counter\n"
  2987. " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
  2988. "--debug run in \"debug\" mode\n"
  2989. "--interval sec Override default 5-second measurement interval\n"
  2990. "--help print this help message\n"
  2991. "--out file create or truncate \"file\" for all output\n"
  2992. "--version print version information\n"
  2993. "\n"
  2994. "For more help, run \"man turbostat\"\n");
  2995. }
  2996. /*
  2997. * in /dev/cpu/ return success for names that are numbers
  2998. * ie. filter out ".", "..", "microcode".
  2999. */
  3000. int dir_filter(const struct dirent *dirp)
  3001. {
  3002. if (isdigit(dirp->d_name[0]))
  3003. return 1;
  3004. else
  3005. return 0;
  3006. }
  3007. int open_dev_cpu_msr(int dummy1)
  3008. {
  3009. return 0;
  3010. }
  3011. void topology_probe()
  3012. {
  3013. int i;
  3014. int max_core_id = 0;
  3015. int max_package_id = 0;
  3016. int max_siblings = 0;
  3017. struct cpu_topology {
  3018. int core_id;
  3019. int physical_package_id;
  3020. } *cpus;
  3021. /* Initialize num_cpus, max_cpu_num */
  3022. topo.num_cpus = 0;
  3023. topo.max_cpu_num = 0;
  3024. for_all_proc_cpus(count_cpus);
  3025. if (!summary_only && topo.num_cpus > 1)
  3026. show_cpu = 1;
  3027. if (debug > 1)
  3028. fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
  3029. cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
  3030. if (cpus == NULL)
  3031. err(1, "calloc cpus");
  3032. /*
  3033. * Allocate and initialize cpu_present_set
  3034. */
  3035. cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
  3036. if (cpu_present_set == NULL)
  3037. err(3, "CPU_ALLOC");
  3038. cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
  3039. CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
  3040. for_all_proc_cpus(mark_cpu_present);
  3041. /*
  3042. * Allocate and initialize cpu_affinity_set
  3043. */
  3044. cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
  3045. if (cpu_affinity_set == NULL)
  3046. err(3, "CPU_ALLOC");
  3047. cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
  3048. CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
  3049. /*
  3050. * For online cpus
  3051. * find max_core_id, max_package_id
  3052. */
  3053. for (i = 0; i <= topo.max_cpu_num; ++i) {
  3054. int siblings;
  3055. if (cpu_is_not_present(i)) {
  3056. if (debug > 1)
  3057. fprintf(outf, "cpu%d NOT PRESENT\n", i);
  3058. continue;
  3059. }
  3060. cpus[i].core_id = get_core_id(i);
  3061. if (cpus[i].core_id > max_core_id)
  3062. max_core_id = cpus[i].core_id;
  3063. cpus[i].physical_package_id = get_physical_package_id(i);
  3064. if (cpus[i].physical_package_id > max_package_id)
  3065. max_package_id = cpus[i].physical_package_id;
  3066. siblings = get_num_ht_siblings(i);
  3067. if (siblings > max_siblings)
  3068. max_siblings = siblings;
  3069. if (debug > 1)
  3070. fprintf(outf, "cpu %d pkg %d core %d\n",
  3071. i, cpus[i].physical_package_id, cpus[i].core_id);
  3072. }
  3073. topo.num_cores_per_pkg = max_core_id + 1;
  3074. if (debug > 1)
  3075. fprintf(outf, "max_core_id %d, sizing for %d cores per package\n",
  3076. max_core_id, topo.num_cores_per_pkg);
  3077. if (debug && !summary_only && topo.num_cores_per_pkg > 1)
  3078. show_core = 1;
  3079. topo.num_packages = max_package_id + 1;
  3080. if (debug > 1)
  3081. fprintf(outf, "max_package_id %d, sizing for %d packages\n",
  3082. max_package_id, topo.num_packages);
  3083. if (debug && !summary_only && topo.num_packages > 1)
  3084. show_pkg = 1;
  3085. topo.num_threads_per_core = max_siblings;
  3086. if (debug > 1)
  3087. fprintf(outf, "max_siblings %d\n", max_siblings);
  3088. free(cpus);
  3089. }
  3090. void
  3091. allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
  3092. {
  3093. int i;
  3094. *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
  3095. topo.num_packages, sizeof(struct thread_data) + sys.thread_counter_bytes);
  3096. if (*t == NULL)
  3097. goto error;
  3098. for (i = 0; i < topo.num_threads_per_core *
  3099. topo.num_cores_per_pkg * topo.num_packages; i++)
  3100. (*t)[i].cpu_id = -1;
  3101. *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
  3102. sizeof(struct core_data) + sys.core_counter_bytes);
  3103. if (*c == NULL)
  3104. goto error;
  3105. for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
  3106. (*c)[i].core_id = -1;
  3107. *p = calloc(topo.num_packages, sizeof(struct pkg_data) + sys.package_counter_bytes);
  3108. if (*p == NULL)
  3109. goto error;
  3110. for (i = 0; i < topo.num_packages; i++)
  3111. (*p)[i].package_id = i;
  3112. return;
  3113. error:
  3114. err(1, "calloc counters");
  3115. }
  3116. /*
  3117. * init_counter()
  3118. *
  3119. * set cpu_id, core_num, pkg_num
  3120. * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
  3121. *
  3122. * increment topo.num_cores when 1st core in pkg seen
  3123. */
  3124. void init_counter(struct thread_data *thread_base, struct core_data *core_base,
  3125. struct pkg_data *pkg_base, int thread_num, int core_num,
  3126. int pkg_num, int cpu_id)
  3127. {
  3128. struct thread_data *t;
  3129. struct core_data *c;
  3130. struct pkg_data *p;
  3131. t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
  3132. c = GET_CORE(core_base, core_num, pkg_num);
  3133. p = GET_PKG(pkg_base, pkg_num);
  3134. t->cpu_id = cpu_id;
  3135. if (thread_num == 0) {
  3136. t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
  3137. if (cpu_is_first_core_in_package(cpu_id))
  3138. t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
  3139. }
  3140. c->core_id = core_num;
  3141. p->package_id = pkg_num;
  3142. }
  3143. int initialize_counters(int cpu_id)
  3144. {
  3145. int my_thread_id, my_core_id, my_package_id;
  3146. my_package_id = get_physical_package_id(cpu_id);
  3147. my_core_id = get_core_id(cpu_id);
  3148. my_thread_id = get_cpu_position_in_core(cpu_id);
  3149. if (!my_thread_id)
  3150. topo.num_cores++;
  3151. init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
  3152. init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
  3153. return 0;
  3154. }
  3155. void allocate_output_buffer()
  3156. {
  3157. output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
  3158. outp = output_buffer;
  3159. if (outp == NULL)
  3160. err(-1, "calloc output buffer");
  3161. }
  3162. void allocate_fd_percpu(void)
  3163. {
  3164. fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int));
  3165. if (fd_percpu == NULL)
  3166. err(-1, "calloc fd_percpu");
  3167. }
  3168. void allocate_irq_buffers(void)
  3169. {
  3170. irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int));
  3171. if (irq_column_2_cpu == NULL)
  3172. err(-1, "calloc %d", topo.num_cpus);
  3173. irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int));
  3174. if (irqs_per_cpu == NULL)
  3175. err(-1, "calloc %d", topo.max_cpu_num + 1);
  3176. }
  3177. void setup_all_buffers(void)
  3178. {
  3179. topology_probe();
  3180. allocate_irq_buffers();
  3181. allocate_fd_percpu();
  3182. allocate_counters(&thread_even, &core_even, &package_even);
  3183. allocate_counters(&thread_odd, &core_odd, &package_odd);
  3184. allocate_output_buffer();
  3185. for_all_proc_cpus(initialize_counters);
  3186. }
  3187. void set_base_cpu(void)
  3188. {
  3189. base_cpu = sched_getcpu();
  3190. if (base_cpu < 0)
  3191. err(-ENODEV, "No valid cpus found");
  3192. if (debug > 1)
  3193. fprintf(outf, "base_cpu = %d\n", base_cpu);
  3194. }
  3195. void turbostat_init()
  3196. {
  3197. setup_all_buffers();
  3198. set_base_cpu();
  3199. check_dev_msr();
  3200. check_permissions();
  3201. process_cpuid();
  3202. if (debug)
  3203. for_all_cpus(print_hwp, ODD_COUNTERS);
  3204. if (debug)
  3205. for_all_cpus(print_epb, ODD_COUNTERS);
  3206. if (debug)
  3207. for_all_cpus(print_perf_limit, ODD_COUNTERS);
  3208. if (debug)
  3209. for_all_cpus(print_rapl, ODD_COUNTERS);
  3210. for_all_cpus(set_temperature_target, ODD_COUNTERS);
  3211. if (debug)
  3212. for_all_cpus(print_thermal, ODD_COUNTERS);
  3213. if (debug && do_irtl_snb)
  3214. print_irtl();
  3215. }
  3216. int fork_it(char **argv)
  3217. {
  3218. pid_t child_pid;
  3219. int status;
  3220. status = for_all_cpus(get_counters, EVEN_COUNTERS);
  3221. if (status)
  3222. exit(status);
  3223. /* clear affinity side-effect of get_counters() */
  3224. sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
  3225. gettimeofday(&tv_even, (struct timezone *)NULL);
  3226. child_pid = fork();
  3227. if (!child_pid) {
  3228. /* child */
  3229. execvp(argv[0], argv);
  3230. } else {
  3231. /* parent */
  3232. if (child_pid == -1)
  3233. err(1, "fork");
  3234. signal(SIGINT, SIG_IGN);
  3235. signal(SIGQUIT, SIG_IGN);
  3236. if (waitpid(child_pid, &status, 0) == -1)
  3237. err(status, "waitpid");
  3238. }
  3239. /*
  3240. * n.b. fork_it() does not check for errors from for_all_cpus()
  3241. * because re-starting is problematic when forking
  3242. */
  3243. for_all_cpus(get_counters, ODD_COUNTERS);
  3244. gettimeofday(&tv_odd, (struct timezone *)NULL);
  3245. timersub(&tv_odd, &tv_even, &tv_delta);
  3246. if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS))
  3247. fprintf(outf, "%s: Counter reset detected\n", progname);
  3248. else {
  3249. compute_average(EVEN_COUNTERS);
  3250. format_all_counters(EVEN_COUNTERS);
  3251. }
  3252. fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
  3253. flush_output_stderr();
  3254. return status;
  3255. }
  3256. int get_and_dump_counters(void)
  3257. {
  3258. int status;
  3259. status = for_all_cpus(get_counters, ODD_COUNTERS);
  3260. if (status)
  3261. return status;
  3262. status = for_all_cpus(dump_counters, ODD_COUNTERS);
  3263. if (status)
  3264. return status;
  3265. flush_output_stdout();
  3266. return status;
  3267. }
  3268. void print_version() {
  3269. fprintf(outf, "turbostat version 4.16 24 Dec 2016"
  3270. " - Len Brown <lenb@kernel.org>\n");
  3271. }
  3272. int add_counter(unsigned int msr_num, char *name, unsigned int width,
  3273. enum counter_scope scope, enum counter_type type,
  3274. enum counter_format format)
  3275. {
  3276. struct msr_counter *msrp;
  3277. msrp = calloc(1, sizeof(struct msr_counter));
  3278. if (msrp == NULL) {
  3279. perror("calloc");
  3280. exit(1);
  3281. }
  3282. msrp->msr_num = msr_num;
  3283. strncpy(msrp->name, name, NAME_BYTES);
  3284. msrp->width = width;
  3285. msrp->type = type;
  3286. msrp->format = format;
  3287. switch (scope) {
  3288. case SCOPE_CPU:
  3289. sys.thread_counter_bytes += 64;
  3290. msrp->next = sys.tp;
  3291. sys.tp = msrp;
  3292. sys.thread_counter_bytes += sizeof(unsigned long long);
  3293. break;
  3294. case SCOPE_CORE:
  3295. sys.core_counter_bytes += 64;
  3296. msrp->next = sys.cp;
  3297. sys.cp = msrp;
  3298. sys.core_counter_bytes += sizeof(unsigned long long);
  3299. break;
  3300. case SCOPE_PACKAGE:
  3301. sys.package_counter_bytes += 64;
  3302. msrp->next = sys.pp;
  3303. sys.pp = msrp;
  3304. sys.package_counter_bytes += sizeof(unsigned long long);
  3305. break;
  3306. }
  3307. return 0;
  3308. }
  3309. void parse_add_command(char *add_command)
  3310. {
  3311. int msr_num = 0;
  3312. char name_buffer[NAME_BYTES];
  3313. int width = 64;
  3314. int fail = 0;
  3315. enum counter_scope scope = SCOPE_CPU;
  3316. enum counter_type type = COUNTER_CYCLES;
  3317. enum counter_format format = FORMAT_DELTA;
  3318. while (add_command) {
  3319. if (sscanf(add_command, "msr0x%x", &msr_num) == 1)
  3320. goto next;
  3321. if (sscanf(add_command, "msr%d", &msr_num) == 1)
  3322. goto next;
  3323. if (sscanf(add_command, "u%d", &width) == 1) {
  3324. if ((width == 32) || (width == 64))
  3325. goto next;
  3326. width = 64;
  3327. }
  3328. if (!strncmp(add_command, "cpu", strlen("cpu"))) {
  3329. scope = SCOPE_CPU;
  3330. goto next;
  3331. }
  3332. if (!strncmp(add_command, "core", strlen("core"))) {
  3333. scope = SCOPE_CORE;
  3334. goto next;
  3335. }
  3336. if (!strncmp(add_command, "package", strlen("package"))) {
  3337. scope = SCOPE_PACKAGE;
  3338. goto next;
  3339. }
  3340. if (!strncmp(add_command, "cycles", strlen("cycles"))) {
  3341. type = COUNTER_CYCLES;
  3342. goto next;
  3343. }
  3344. if (!strncmp(add_command, "seconds", strlen("seconds"))) {
  3345. type = COUNTER_SECONDS;
  3346. goto next;
  3347. }
  3348. if (!strncmp(add_command, "raw", strlen("raw"))) {
  3349. format = FORMAT_RAW;
  3350. goto next;
  3351. }
  3352. if (!strncmp(add_command, "delta", strlen("delta"))) {
  3353. format = FORMAT_DELTA;
  3354. goto next;
  3355. }
  3356. if (!strncmp(add_command, "percent", strlen("percent"))) {
  3357. format = FORMAT_PERCENT;
  3358. goto next;
  3359. }
  3360. if (sscanf(add_command, "%18s,%*s", name_buffer) == 1) { /* 18 < NAME_BYTES */
  3361. char *eos;
  3362. eos = strchr(name_buffer, ',');
  3363. if (eos)
  3364. *eos = '\0';
  3365. goto next;
  3366. }
  3367. next:
  3368. add_command = strchr(add_command, ',');
  3369. if (add_command)
  3370. add_command++;
  3371. }
  3372. if (msr_num == 0) {
  3373. fprintf(stderr, "--add: (msrDDD | msr0xXXX) required\n");
  3374. fail++;
  3375. }
  3376. /* generate default column header */
  3377. if (*name_buffer == '\0') {
  3378. if (format == FORMAT_RAW) {
  3379. if (width == 32)
  3380. sprintf(name_buffer, "msr%d", msr_num);
  3381. else
  3382. sprintf(name_buffer, "MSR%d", msr_num);
  3383. } else if (format == FORMAT_DELTA) {
  3384. if (width == 32)
  3385. sprintf(name_buffer, "cnt%d", msr_num);
  3386. else
  3387. sprintf(name_buffer, "CNT%d", msr_num);
  3388. } else if (format == FORMAT_PERCENT) {
  3389. if (width == 32)
  3390. sprintf(name_buffer, "msr%d%%", msr_num);
  3391. else
  3392. sprintf(name_buffer, "MSR%d%%", msr_num);
  3393. }
  3394. }
  3395. if (add_counter(msr_num, name_buffer, width, scope, type, format))
  3396. fail++;
  3397. if (fail) {
  3398. help();
  3399. exit(1);
  3400. }
  3401. }
  3402. void cmdline(int argc, char **argv)
  3403. {
  3404. int opt;
  3405. int option_index = 0;
  3406. static struct option long_options[] = {
  3407. {"add", required_argument, 0, 'a'},
  3408. {"Dump", no_argument, 0, 'D'},
  3409. {"debug", no_argument, 0, 'd'},
  3410. {"interval", required_argument, 0, 'i'},
  3411. {"help", no_argument, 0, 'h'},
  3412. {"Joules", no_argument, 0, 'J'},
  3413. {"out", required_argument, 0, 'o'},
  3414. {"Package", no_argument, 0, 'p'},
  3415. {"processor", no_argument, 0, 'p'},
  3416. {"Summary", no_argument, 0, 'S'},
  3417. {"TCC", required_argument, 0, 'T'},
  3418. {"version", no_argument, 0, 'v' },
  3419. {0, 0, 0, 0 }
  3420. };
  3421. progname = argv[0];
  3422. while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:PpST:v",
  3423. long_options, &option_index)) != -1) {
  3424. switch (opt) {
  3425. case 'a':
  3426. parse_add_command(optarg);
  3427. break;
  3428. case 'D':
  3429. dump_only++;
  3430. break;
  3431. case 'd':
  3432. debug++;
  3433. break;
  3434. case 'h':
  3435. default:
  3436. help();
  3437. exit(1);
  3438. case 'i':
  3439. {
  3440. double interval = strtod(optarg, NULL);
  3441. if (interval < 0.001) {
  3442. fprintf(outf, "interval %f seconds is too small\n",
  3443. interval);
  3444. exit(2);
  3445. }
  3446. interval_ts.tv_sec = interval;
  3447. interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000;
  3448. }
  3449. break;
  3450. case 'J':
  3451. rapl_joules++;
  3452. break;
  3453. case 'o':
  3454. outf = fopen_or_die(optarg, "w");
  3455. break;
  3456. case 'P':
  3457. show_pkg_only++;
  3458. break;
  3459. case 'p':
  3460. show_core_only++;
  3461. break;
  3462. case 'S':
  3463. summary_only++;
  3464. break;
  3465. case 'T':
  3466. tcc_activation_temp_override = atoi(optarg);
  3467. break;
  3468. case 'v':
  3469. print_version();
  3470. exit(0);
  3471. break;
  3472. }
  3473. }
  3474. }
  3475. int main(int argc, char **argv)
  3476. {
  3477. outf = stderr;
  3478. cmdline(argc, argv);
  3479. if (debug)
  3480. print_version();
  3481. turbostat_init();
  3482. /* dump counters and exit */
  3483. if (dump_only)
  3484. return get_and_dump_counters();
  3485. /*
  3486. * if any params left, it must be a command to fork
  3487. */
  3488. if (argc - optind)
  3489. return fork_it(argv + optind);
  3490. else
  3491. turbostat_loop();
  3492. return 0;
  3493. }