tlb_uv.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294
  1. /*
  2. * SGI UltraViolet TLB flush routines.
  3. *
  4. * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
  5. *
  6. * This code is released under the GNU General Public License version 2 or
  7. * later.
  8. */
  9. #include <linux/seq_file.h>
  10. #include <linux/proc_fs.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/delay.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/uv/uv.h>
  17. #include <asm/uv/uv_mmrs.h>
  18. #include <asm/uv/uv_hub.h>
  19. #include <asm/uv/uv_bau.h>
  20. #include <asm/apic.h>
  21. #include <asm/tsc.h>
  22. #include <asm/irq_vectors.h>
  23. #include <asm/timer.h>
  24. static struct bau_operations ops __ro_after_init;
  25. /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
  26. static int timeout_base_ns[] = {
  27. 20,
  28. 160,
  29. 1280,
  30. 10240,
  31. 81920,
  32. 655360,
  33. 5242880,
  34. 167772160
  35. };
  36. static int timeout_us;
  37. static bool nobau = true;
  38. static int nobau_perm;
  39. static cycles_t congested_cycles;
  40. /* tunables: */
  41. static int max_concurr = MAX_BAU_CONCURRENT;
  42. static int max_concurr_const = MAX_BAU_CONCURRENT;
  43. static int plugged_delay = PLUGGED_DELAY;
  44. static int plugsb4reset = PLUGSB4RESET;
  45. static int giveup_limit = GIVEUP_LIMIT;
  46. static int timeoutsb4reset = TIMEOUTSB4RESET;
  47. static int ipi_reset_limit = IPI_RESET_LIMIT;
  48. static int complete_threshold = COMPLETE_THRESHOLD;
  49. static int congested_respns_us = CONGESTED_RESPONSE_US;
  50. static int congested_reps = CONGESTED_REPS;
  51. static int disabled_period = DISABLED_PERIOD;
  52. static struct tunables tunables[] = {
  53. {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
  54. {&plugged_delay, PLUGGED_DELAY},
  55. {&plugsb4reset, PLUGSB4RESET},
  56. {&timeoutsb4reset, TIMEOUTSB4RESET},
  57. {&ipi_reset_limit, IPI_RESET_LIMIT},
  58. {&complete_threshold, COMPLETE_THRESHOLD},
  59. {&congested_respns_us, CONGESTED_RESPONSE_US},
  60. {&congested_reps, CONGESTED_REPS},
  61. {&disabled_period, DISABLED_PERIOD},
  62. {&giveup_limit, GIVEUP_LIMIT}
  63. };
  64. static struct dentry *tunables_dir;
  65. static struct dentry *tunables_file;
  66. /* these correspond to the statistics printed by ptc_seq_show() */
  67. static char *stat_description[] = {
  68. "sent: number of shootdown messages sent",
  69. "stime: time spent sending messages",
  70. "numuvhubs: number of hubs targeted with shootdown",
  71. "numuvhubs16: number times 16 or more hubs targeted",
  72. "numuvhubs8: number times 8 or more hubs targeted",
  73. "numuvhubs4: number times 4 or more hubs targeted",
  74. "numuvhubs2: number times 2 or more hubs targeted",
  75. "numuvhubs1: number times 1 hub targeted",
  76. "numcpus: number of cpus targeted with shootdown",
  77. "dto: number of destination timeouts",
  78. "retries: destination timeout retries sent",
  79. "rok: : destination timeouts successfully retried",
  80. "resetp: ipi-style resource resets for plugs",
  81. "resett: ipi-style resource resets for timeouts",
  82. "giveup: fall-backs to ipi-style shootdowns",
  83. "sto: number of source timeouts",
  84. "bz: number of stay-busy's",
  85. "throt: number times spun in throttle",
  86. "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
  87. "recv: shootdown messages received",
  88. "rtime: time spent processing messages",
  89. "all: shootdown all-tlb messages",
  90. "one: shootdown one-tlb messages",
  91. "mult: interrupts that found multiple messages",
  92. "none: interrupts that found no messages",
  93. "retry: number of retry messages processed",
  94. "canc: number messages canceled by retries",
  95. "nocan: number retries that found nothing to cancel",
  96. "reset: number of ipi-style reset requests processed",
  97. "rcan: number messages canceled by reset requests",
  98. "disable: number times use of the BAU was disabled",
  99. "enable: number times use of the BAU was re-enabled"
  100. };
  101. static int __init setup_bau(char *arg)
  102. {
  103. int result;
  104. if (!arg)
  105. return -EINVAL;
  106. result = strtobool(arg, &nobau);
  107. if (result)
  108. return result;
  109. /* we need to flip the logic here, so that bau=y sets nobau to false */
  110. nobau = !nobau;
  111. if (!nobau)
  112. pr_info("UV BAU Enabled\n");
  113. else
  114. pr_info("UV BAU Disabled\n");
  115. return 0;
  116. }
  117. early_param("bau", setup_bau);
  118. /* base pnode in this partition */
  119. static int uv_base_pnode __read_mostly;
  120. static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
  121. static DEFINE_PER_CPU(struct bau_control, bau_control);
  122. static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
  123. static void
  124. set_bau_on(void)
  125. {
  126. int cpu;
  127. struct bau_control *bcp;
  128. if (nobau_perm) {
  129. pr_info("BAU not initialized; cannot be turned on\n");
  130. return;
  131. }
  132. nobau = false;
  133. for_each_present_cpu(cpu) {
  134. bcp = &per_cpu(bau_control, cpu);
  135. bcp->nobau = false;
  136. }
  137. pr_info("BAU turned on\n");
  138. return;
  139. }
  140. static void
  141. set_bau_off(void)
  142. {
  143. int cpu;
  144. struct bau_control *bcp;
  145. nobau = true;
  146. for_each_present_cpu(cpu) {
  147. bcp = &per_cpu(bau_control, cpu);
  148. bcp->nobau = true;
  149. }
  150. pr_info("BAU turned off\n");
  151. return;
  152. }
  153. /*
  154. * Determine the first node on a uvhub. 'Nodes' are used for kernel
  155. * memory allocation.
  156. */
  157. static int __init uvhub_to_first_node(int uvhub)
  158. {
  159. int node, b;
  160. for_each_online_node(node) {
  161. b = uv_node_to_blade_id(node);
  162. if (uvhub == b)
  163. return node;
  164. }
  165. return -1;
  166. }
  167. /*
  168. * Determine the apicid of the first cpu on a uvhub.
  169. */
  170. static int __init uvhub_to_first_apicid(int uvhub)
  171. {
  172. int cpu;
  173. for_each_present_cpu(cpu)
  174. if (uvhub == uv_cpu_to_blade_id(cpu))
  175. return per_cpu(x86_cpu_to_apicid, cpu);
  176. return -1;
  177. }
  178. /*
  179. * Free a software acknowledge hardware resource by clearing its Pending
  180. * bit. This will return a reply to the sender.
  181. * If the message has timed out, a reply has already been sent by the
  182. * hardware but the resource has not been released. In that case our
  183. * clear of the Timeout bit (as well) will free the resource. No reply will
  184. * be sent (the hardware will only do one reply per message).
  185. */
  186. static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
  187. int do_acknowledge)
  188. {
  189. unsigned long dw;
  190. struct bau_pq_entry *msg;
  191. msg = mdp->msg;
  192. if (!msg->canceled && do_acknowledge) {
  193. dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
  194. ops.write_l_sw_ack(dw);
  195. }
  196. msg->replied_to = 1;
  197. msg->swack_vec = 0;
  198. }
  199. /*
  200. * Process the receipt of a RETRY message
  201. */
  202. static void bau_process_retry_msg(struct msg_desc *mdp,
  203. struct bau_control *bcp)
  204. {
  205. int i;
  206. int cancel_count = 0;
  207. unsigned long msg_res;
  208. unsigned long mmr = 0;
  209. struct bau_pq_entry *msg = mdp->msg;
  210. struct bau_pq_entry *msg2;
  211. struct ptc_stats *stat = bcp->statp;
  212. stat->d_retries++;
  213. /*
  214. * cancel any message from msg+1 to the retry itself
  215. */
  216. for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
  217. if (msg2 > mdp->queue_last)
  218. msg2 = mdp->queue_first;
  219. if (msg2 == msg)
  220. break;
  221. /* same conditions for cancellation as do_reset */
  222. if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
  223. (msg2->swack_vec) && ((msg2->swack_vec &
  224. msg->swack_vec) == 0) &&
  225. (msg2->sending_cpu == msg->sending_cpu) &&
  226. (msg2->msg_type != MSG_NOOP)) {
  227. mmr = ops.read_l_sw_ack();
  228. msg_res = msg2->swack_vec;
  229. /*
  230. * This is a message retry; clear the resources held
  231. * by the previous message only if they timed out.
  232. * If it has not timed out we have an unexpected
  233. * situation to report.
  234. */
  235. if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
  236. unsigned long mr;
  237. /*
  238. * Is the resource timed out?
  239. * Make everyone ignore the cancelled message.
  240. */
  241. msg2->canceled = 1;
  242. stat->d_canceled++;
  243. cancel_count++;
  244. mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
  245. ops.write_l_sw_ack(mr);
  246. }
  247. }
  248. }
  249. if (!cancel_count)
  250. stat->d_nocanceled++;
  251. }
  252. /*
  253. * Do all the things a cpu should do for a TLB shootdown message.
  254. * Other cpu's may come here at the same time for this message.
  255. */
  256. static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
  257. int do_acknowledge)
  258. {
  259. short socket_ack_count = 0;
  260. short *sp;
  261. struct atomic_short *asp;
  262. struct ptc_stats *stat = bcp->statp;
  263. struct bau_pq_entry *msg = mdp->msg;
  264. struct bau_control *smaster = bcp->socket_master;
  265. /*
  266. * This must be a normal message, or retry of a normal message
  267. */
  268. if (msg->address == TLB_FLUSH_ALL) {
  269. local_flush_tlb();
  270. stat->d_alltlb++;
  271. } else {
  272. __flush_tlb_one(msg->address);
  273. stat->d_onetlb++;
  274. }
  275. stat->d_requestee++;
  276. /*
  277. * One cpu on each uvhub has the additional job on a RETRY
  278. * of releasing the resource held by the message that is
  279. * being retried. That message is identified by sending
  280. * cpu number.
  281. */
  282. if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
  283. bau_process_retry_msg(mdp, bcp);
  284. /*
  285. * This is a swack message, so we have to reply to it.
  286. * Count each responding cpu on the socket. This avoids
  287. * pinging the count's cache line back and forth between
  288. * the sockets.
  289. */
  290. sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
  291. asp = (struct atomic_short *)sp;
  292. socket_ack_count = atom_asr(1, asp);
  293. if (socket_ack_count == bcp->cpus_in_socket) {
  294. int msg_ack_count;
  295. /*
  296. * Both sockets dump their completed count total into
  297. * the message's count.
  298. */
  299. *sp = 0;
  300. asp = (struct atomic_short *)&msg->acknowledge_count;
  301. msg_ack_count = atom_asr(socket_ack_count, asp);
  302. if (msg_ack_count == bcp->cpus_in_uvhub) {
  303. /*
  304. * All cpus in uvhub saw it; reply
  305. * (unless we are in the UV2 workaround)
  306. */
  307. reply_to_message(mdp, bcp, do_acknowledge);
  308. }
  309. }
  310. return;
  311. }
  312. /*
  313. * Determine the first cpu on a pnode.
  314. */
  315. static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
  316. {
  317. int cpu;
  318. struct hub_and_pnode *hpp;
  319. for_each_present_cpu(cpu) {
  320. hpp = &smaster->thp[cpu];
  321. if (pnode == hpp->pnode)
  322. return cpu;
  323. }
  324. return -1;
  325. }
  326. /*
  327. * Last resort when we get a large number of destination timeouts is
  328. * to clear resources held by a given cpu.
  329. * Do this with IPI so that all messages in the BAU message queue
  330. * can be identified by their nonzero swack_vec field.
  331. *
  332. * This is entered for a single cpu on the uvhub.
  333. * The sender want's this uvhub to free a specific message's
  334. * swack resources.
  335. */
  336. static void do_reset(void *ptr)
  337. {
  338. int i;
  339. struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
  340. struct reset_args *rap = (struct reset_args *)ptr;
  341. struct bau_pq_entry *msg;
  342. struct ptc_stats *stat = bcp->statp;
  343. stat->d_resets++;
  344. /*
  345. * We're looking for the given sender, and
  346. * will free its swack resource.
  347. * If all cpu's finally responded after the timeout, its
  348. * message 'replied_to' was set.
  349. */
  350. for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
  351. unsigned long msg_res;
  352. /* do_reset: same conditions for cancellation as
  353. bau_process_retry_msg() */
  354. if ((msg->replied_to == 0) &&
  355. (msg->canceled == 0) &&
  356. (msg->sending_cpu == rap->sender) &&
  357. (msg->swack_vec) &&
  358. (msg->msg_type != MSG_NOOP)) {
  359. unsigned long mmr;
  360. unsigned long mr;
  361. /*
  362. * make everyone else ignore this message
  363. */
  364. msg->canceled = 1;
  365. /*
  366. * only reset the resource if it is still pending
  367. */
  368. mmr = ops.read_l_sw_ack();
  369. msg_res = msg->swack_vec;
  370. mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
  371. if (mmr & msg_res) {
  372. stat->d_rcanceled++;
  373. ops.write_l_sw_ack(mr);
  374. }
  375. }
  376. }
  377. return;
  378. }
  379. /*
  380. * Use IPI to get all target uvhubs to release resources held by
  381. * a given sending cpu number.
  382. */
  383. static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
  384. {
  385. int pnode;
  386. int apnode;
  387. int maskbits;
  388. int sender = bcp->cpu;
  389. cpumask_t *mask = bcp->uvhub_master->cpumask;
  390. struct bau_control *smaster = bcp->socket_master;
  391. struct reset_args reset_args;
  392. reset_args.sender = sender;
  393. cpumask_clear(mask);
  394. /* find a single cpu for each uvhub in this distribution mask */
  395. maskbits = sizeof(struct pnmask) * BITSPERBYTE;
  396. /* each bit is a pnode relative to the partition base pnode */
  397. for (pnode = 0; pnode < maskbits; pnode++) {
  398. int cpu;
  399. if (!bau_uvhub_isset(pnode, distribution))
  400. continue;
  401. apnode = pnode + bcp->partition_base_pnode;
  402. cpu = pnode_to_first_cpu(apnode, smaster);
  403. cpumask_set_cpu(cpu, mask);
  404. }
  405. /* IPI all cpus; preemption is already disabled */
  406. smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
  407. return;
  408. }
  409. /*
  410. * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
  411. * number, not an absolute. It converts a duration in cycles to a duration in
  412. * ns.
  413. */
  414. static inline unsigned long long cycles_2_ns(unsigned long long cyc)
  415. {
  416. struct cyc2ns_data *data = cyc2ns_read_begin();
  417. unsigned long long ns;
  418. ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
  419. cyc2ns_read_end(data);
  420. return ns;
  421. }
  422. /*
  423. * The reverse of the above; converts a duration in ns to a duration in cycles.
  424. */
  425. static inline unsigned long long ns_2_cycles(unsigned long long ns)
  426. {
  427. struct cyc2ns_data *data = cyc2ns_read_begin();
  428. unsigned long long cyc;
  429. cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
  430. cyc2ns_read_end(data);
  431. return cyc;
  432. }
  433. static inline unsigned long cycles_2_us(unsigned long long cyc)
  434. {
  435. return cycles_2_ns(cyc) / NSEC_PER_USEC;
  436. }
  437. static inline cycles_t sec_2_cycles(unsigned long sec)
  438. {
  439. return ns_2_cycles(sec * NSEC_PER_SEC);
  440. }
  441. static inline unsigned long long usec_2_cycles(unsigned long usec)
  442. {
  443. return ns_2_cycles(usec * NSEC_PER_USEC);
  444. }
  445. /*
  446. * wait for all cpus on this hub to finish their sends and go quiet
  447. * leaves uvhub_quiesce set so that no new broadcasts are started by
  448. * bau_flush_send_and_wait()
  449. */
  450. static inline void quiesce_local_uvhub(struct bau_control *hmaster)
  451. {
  452. atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
  453. }
  454. /*
  455. * mark this quiet-requestor as done
  456. */
  457. static inline void end_uvhub_quiesce(struct bau_control *hmaster)
  458. {
  459. atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
  460. }
  461. static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
  462. {
  463. unsigned long descriptor_status;
  464. descriptor_status = uv_read_local_mmr(mmr_offset);
  465. descriptor_status >>= right_shift;
  466. descriptor_status &= UV_ACT_STATUS_MASK;
  467. return descriptor_status;
  468. }
  469. /*
  470. * Wait for completion of a broadcast software ack message
  471. * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
  472. */
  473. static int uv1_wait_completion(struct bau_desc *bau_desc,
  474. struct bau_control *bcp, long try)
  475. {
  476. unsigned long descriptor_status;
  477. cycles_t ttm;
  478. u64 mmr_offset = bcp->status_mmr;
  479. int right_shift = bcp->status_index;
  480. struct ptc_stats *stat = bcp->statp;
  481. descriptor_status = uv1_read_status(mmr_offset, right_shift);
  482. /* spin on the status MMR, waiting for it to go idle */
  483. while ((descriptor_status != DS_IDLE)) {
  484. /*
  485. * Our software ack messages may be blocked because
  486. * there are no swack resources available. As long
  487. * as none of them has timed out hardware will NACK
  488. * our message and its state will stay IDLE.
  489. */
  490. if (descriptor_status == DS_SOURCE_TIMEOUT) {
  491. stat->s_stimeout++;
  492. return FLUSH_GIVEUP;
  493. } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
  494. stat->s_dtimeout++;
  495. ttm = get_cycles();
  496. /*
  497. * Our retries may be blocked by all destination
  498. * swack resources being consumed, and a timeout
  499. * pending. In that case hardware returns the
  500. * ERROR that looks like a destination timeout.
  501. */
  502. if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
  503. bcp->conseccompletes = 0;
  504. return FLUSH_RETRY_PLUGGED;
  505. }
  506. bcp->conseccompletes = 0;
  507. return FLUSH_RETRY_TIMEOUT;
  508. } else {
  509. /*
  510. * descriptor_status is still BUSY
  511. */
  512. cpu_relax();
  513. }
  514. descriptor_status = uv1_read_status(mmr_offset, right_shift);
  515. }
  516. bcp->conseccompletes++;
  517. return FLUSH_COMPLETE;
  518. }
  519. /*
  520. * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
  521. * But not currently used.
  522. */
  523. static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
  524. {
  525. return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
  526. }
  527. /*
  528. * Return whether the status of the descriptor that is normally used for this
  529. * cpu (the one indexed by its hub-relative cpu number) is busy.
  530. * The status of the original 32 descriptors is always reflected in the 64
  531. * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
  532. * The bit provided by the activation_status_2 register is irrelevant to
  533. * the status if it is only being tested for busy or not busy.
  534. */
  535. int normal_busy(struct bau_control *bcp)
  536. {
  537. int cpu = bcp->uvhub_cpu;
  538. int mmr_offset;
  539. int right_shift;
  540. mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
  541. right_shift = cpu * UV_ACT_STATUS_SIZE;
  542. return (((((read_lmmr(mmr_offset) >> right_shift) &
  543. UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
  544. }
  545. /*
  546. * Entered when a bau descriptor has gone into a permanent busy wait because
  547. * of a hardware bug.
  548. * Workaround the bug.
  549. */
  550. int handle_uv2_busy(struct bau_control *bcp)
  551. {
  552. struct ptc_stats *stat = bcp->statp;
  553. stat->s_uv2_wars++;
  554. bcp->busy = 1;
  555. return FLUSH_GIVEUP;
  556. }
  557. static int uv2_3_wait_completion(struct bau_desc *bau_desc,
  558. struct bau_control *bcp, long try)
  559. {
  560. unsigned long descriptor_stat;
  561. cycles_t ttm;
  562. u64 mmr_offset = bcp->status_mmr;
  563. int right_shift = bcp->status_index;
  564. int desc = bcp->uvhub_cpu;
  565. long busy_reps = 0;
  566. struct ptc_stats *stat = bcp->statp;
  567. descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
  568. /* spin on the status MMR, waiting for it to go idle */
  569. while (descriptor_stat != UV2H_DESC_IDLE) {
  570. if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
  571. /*
  572. * A h/w bug on the destination side may
  573. * have prevented the message being marked
  574. * pending, thus it doesn't get replied to
  575. * and gets continually nacked until it times
  576. * out with a SOURCE_TIMEOUT.
  577. */
  578. stat->s_stimeout++;
  579. return FLUSH_GIVEUP;
  580. } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
  581. ttm = get_cycles();
  582. /*
  583. * Our retries may be blocked by all destination
  584. * swack resources being consumed, and a timeout
  585. * pending. In that case hardware returns the
  586. * ERROR that looks like a destination timeout.
  587. * Without using the extended status we have to
  588. * deduce from the short time that this was a
  589. * strong nack.
  590. */
  591. if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
  592. bcp->conseccompletes = 0;
  593. stat->s_plugged++;
  594. /* FLUSH_RETRY_PLUGGED causes hang on boot */
  595. return FLUSH_GIVEUP;
  596. }
  597. stat->s_dtimeout++;
  598. bcp->conseccompletes = 0;
  599. /* FLUSH_RETRY_TIMEOUT causes hang on boot */
  600. return FLUSH_GIVEUP;
  601. } else {
  602. busy_reps++;
  603. if (busy_reps > 1000000) {
  604. /* not to hammer on the clock */
  605. busy_reps = 0;
  606. ttm = get_cycles();
  607. if ((ttm - bcp->send_message) > bcp->timeout_interval)
  608. return handle_uv2_busy(bcp);
  609. }
  610. /*
  611. * descriptor_stat is still BUSY
  612. */
  613. cpu_relax();
  614. }
  615. descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
  616. }
  617. bcp->conseccompletes++;
  618. return FLUSH_COMPLETE;
  619. }
  620. /*
  621. * Returns the status of current BAU message for cpu desc as a bit field
  622. * [Error][Busy][Aux]
  623. */
  624. static u64 read_status(u64 status_mmr, int index, int desc)
  625. {
  626. u64 stat;
  627. stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
  628. stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
  629. return stat;
  630. }
  631. static int uv4_wait_completion(struct bau_desc *bau_desc,
  632. struct bau_control *bcp, long try)
  633. {
  634. struct ptc_stats *stat = bcp->statp;
  635. u64 descriptor_stat;
  636. u64 mmr = bcp->status_mmr;
  637. int index = bcp->status_index;
  638. int desc = bcp->uvhub_cpu;
  639. descriptor_stat = read_status(mmr, index, desc);
  640. /* spin on the status MMR, waiting for it to go idle */
  641. while (descriptor_stat != UV2H_DESC_IDLE) {
  642. switch (descriptor_stat) {
  643. case UV2H_DESC_SOURCE_TIMEOUT:
  644. stat->s_stimeout++;
  645. return FLUSH_GIVEUP;
  646. case UV2H_DESC_DEST_TIMEOUT:
  647. stat->s_dtimeout++;
  648. bcp->conseccompletes = 0;
  649. return FLUSH_RETRY_TIMEOUT;
  650. case UV2H_DESC_DEST_STRONG_NACK:
  651. stat->s_plugged++;
  652. bcp->conseccompletes = 0;
  653. return FLUSH_RETRY_PLUGGED;
  654. case UV2H_DESC_DEST_PUT_ERR:
  655. bcp->conseccompletes = 0;
  656. return FLUSH_GIVEUP;
  657. default:
  658. /* descriptor_stat is still BUSY */
  659. cpu_relax();
  660. }
  661. descriptor_stat = read_status(mmr, index, desc);
  662. }
  663. bcp->conseccompletes++;
  664. return FLUSH_COMPLETE;
  665. }
  666. /*
  667. * Our retries are blocked by all destination sw ack resources being
  668. * in use, and a timeout is pending. In that case hardware immediately
  669. * returns the ERROR that looks like a destination timeout.
  670. */
  671. static void destination_plugged(struct bau_desc *bau_desc,
  672. struct bau_control *bcp,
  673. struct bau_control *hmaster, struct ptc_stats *stat)
  674. {
  675. udelay(bcp->plugged_delay);
  676. bcp->plugged_tries++;
  677. if (bcp->plugged_tries >= bcp->plugsb4reset) {
  678. bcp->plugged_tries = 0;
  679. quiesce_local_uvhub(hmaster);
  680. spin_lock(&hmaster->queue_lock);
  681. reset_with_ipi(&bau_desc->distribution, bcp);
  682. spin_unlock(&hmaster->queue_lock);
  683. end_uvhub_quiesce(hmaster);
  684. bcp->ipi_attempts++;
  685. stat->s_resets_plug++;
  686. }
  687. }
  688. static void destination_timeout(struct bau_desc *bau_desc,
  689. struct bau_control *bcp, struct bau_control *hmaster,
  690. struct ptc_stats *stat)
  691. {
  692. hmaster->max_concurr = 1;
  693. bcp->timeout_tries++;
  694. if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
  695. bcp->timeout_tries = 0;
  696. quiesce_local_uvhub(hmaster);
  697. spin_lock(&hmaster->queue_lock);
  698. reset_with_ipi(&bau_desc->distribution, bcp);
  699. spin_unlock(&hmaster->queue_lock);
  700. end_uvhub_quiesce(hmaster);
  701. bcp->ipi_attempts++;
  702. stat->s_resets_timeout++;
  703. }
  704. }
  705. /*
  706. * Stop all cpus on a uvhub from using the BAU for a period of time.
  707. * This is reversed by check_enable.
  708. */
  709. static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
  710. {
  711. int tcpu;
  712. struct bau_control *tbcp;
  713. struct bau_control *hmaster;
  714. cycles_t tm1;
  715. hmaster = bcp->uvhub_master;
  716. spin_lock(&hmaster->disable_lock);
  717. if (!bcp->baudisabled) {
  718. stat->s_bau_disabled++;
  719. tm1 = get_cycles();
  720. for_each_present_cpu(tcpu) {
  721. tbcp = &per_cpu(bau_control, tcpu);
  722. if (tbcp->uvhub_master == hmaster) {
  723. tbcp->baudisabled = 1;
  724. tbcp->set_bau_on_time =
  725. tm1 + bcp->disabled_period;
  726. }
  727. }
  728. }
  729. spin_unlock(&hmaster->disable_lock);
  730. }
  731. static void count_max_concurr(int stat, struct bau_control *bcp,
  732. struct bau_control *hmaster)
  733. {
  734. bcp->plugged_tries = 0;
  735. bcp->timeout_tries = 0;
  736. if (stat != FLUSH_COMPLETE)
  737. return;
  738. if (bcp->conseccompletes <= bcp->complete_threshold)
  739. return;
  740. if (hmaster->max_concurr >= hmaster->max_concurr_const)
  741. return;
  742. hmaster->max_concurr++;
  743. }
  744. static void record_send_stats(cycles_t time1, cycles_t time2,
  745. struct bau_control *bcp, struct ptc_stats *stat,
  746. int completion_status, int try)
  747. {
  748. cycles_t elapsed;
  749. if (time2 > time1) {
  750. elapsed = time2 - time1;
  751. stat->s_time += elapsed;
  752. if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
  753. bcp->period_requests++;
  754. bcp->period_time += elapsed;
  755. if ((elapsed > congested_cycles) &&
  756. (bcp->period_requests > bcp->cong_reps) &&
  757. ((bcp->period_time / bcp->period_requests) >
  758. congested_cycles)) {
  759. stat->s_congested++;
  760. disable_for_period(bcp, stat);
  761. }
  762. }
  763. } else
  764. stat->s_requestor--;
  765. if (completion_status == FLUSH_COMPLETE && try > 1)
  766. stat->s_retriesok++;
  767. else if (completion_status == FLUSH_GIVEUP) {
  768. stat->s_giveup++;
  769. if (get_cycles() > bcp->period_end)
  770. bcp->period_giveups = 0;
  771. bcp->period_giveups++;
  772. if (bcp->period_giveups == 1)
  773. bcp->period_end = get_cycles() + bcp->disabled_period;
  774. if (bcp->period_giveups > bcp->giveup_limit) {
  775. disable_for_period(bcp, stat);
  776. stat->s_giveuplimit++;
  777. }
  778. }
  779. }
  780. /*
  781. * Because of a uv1 hardware bug only a limited number of concurrent
  782. * requests can be made.
  783. */
  784. static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
  785. {
  786. spinlock_t *lock = &hmaster->uvhub_lock;
  787. atomic_t *v;
  788. v = &hmaster->active_descriptor_count;
  789. if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
  790. stat->s_throttles++;
  791. do {
  792. cpu_relax();
  793. } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
  794. }
  795. }
  796. /*
  797. * Handle the completion status of a message send.
  798. */
  799. static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
  800. struct bau_control *bcp, struct bau_control *hmaster,
  801. struct ptc_stats *stat)
  802. {
  803. if (completion_status == FLUSH_RETRY_PLUGGED)
  804. destination_plugged(bau_desc, bcp, hmaster, stat);
  805. else if (completion_status == FLUSH_RETRY_TIMEOUT)
  806. destination_timeout(bau_desc, bcp, hmaster, stat);
  807. }
  808. /*
  809. * Send a broadcast and wait for it to complete.
  810. *
  811. * The flush_mask contains the cpus the broadcast is to be sent to including
  812. * cpus that are on the local uvhub.
  813. *
  814. * Returns 0 if all flushing represented in the mask was done.
  815. * Returns 1 if it gives up entirely and the original cpu mask is to be
  816. * returned to the kernel.
  817. */
  818. int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
  819. struct bau_desc *bau_desc)
  820. {
  821. int seq_number = 0;
  822. int completion_stat = 0;
  823. int uv1 = 0;
  824. long try = 0;
  825. unsigned long index;
  826. cycles_t time1;
  827. cycles_t time2;
  828. struct ptc_stats *stat = bcp->statp;
  829. struct bau_control *hmaster = bcp->uvhub_master;
  830. struct uv1_bau_msg_header *uv1_hdr = NULL;
  831. struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
  832. if (bcp->uvhub_version == UV_BAU_V1) {
  833. uv1 = 1;
  834. uv1_throttle(hmaster, stat);
  835. }
  836. while (hmaster->uvhub_quiesce)
  837. cpu_relax();
  838. time1 = get_cycles();
  839. if (uv1)
  840. uv1_hdr = &bau_desc->header.uv1_hdr;
  841. else
  842. /* uv2 and uv3 */
  843. uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
  844. do {
  845. if (try == 0) {
  846. if (uv1)
  847. uv1_hdr->msg_type = MSG_REGULAR;
  848. else
  849. uv2_3_hdr->msg_type = MSG_REGULAR;
  850. seq_number = bcp->message_number++;
  851. } else {
  852. if (uv1)
  853. uv1_hdr->msg_type = MSG_RETRY;
  854. else
  855. uv2_3_hdr->msg_type = MSG_RETRY;
  856. stat->s_retry_messages++;
  857. }
  858. if (uv1)
  859. uv1_hdr->sequence = seq_number;
  860. else
  861. uv2_3_hdr->sequence = seq_number;
  862. index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
  863. bcp->send_message = get_cycles();
  864. write_mmr_activation(index);
  865. try++;
  866. completion_stat = ops.wait_completion(bau_desc, bcp, try);
  867. handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
  868. if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
  869. bcp->ipi_attempts = 0;
  870. stat->s_overipilimit++;
  871. completion_stat = FLUSH_GIVEUP;
  872. break;
  873. }
  874. cpu_relax();
  875. } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
  876. (completion_stat == FLUSH_RETRY_TIMEOUT));
  877. time2 = get_cycles();
  878. count_max_concurr(completion_stat, bcp, hmaster);
  879. while (hmaster->uvhub_quiesce)
  880. cpu_relax();
  881. atomic_dec(&hmaster->active_descriptor_count);
  882. record_send_stats(time1, time2, bcp, stat, completion_stat, try);
  883. if (completion_stat == FLUSH_GIVEUP)
  884. /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
  885. return 1;
  886. return 0;
  887. }
  888. /*
  889. * The BAU is disabled for this uvhub. When the disabled time period has
  890. * expired re-enable it.
  891. * Return 0 if it is re-enabled for all cpus on this uvhub.
  892. */
  893. static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
  894. {
  895. int tcpu;
  896. struct bau_control *tbcp;
  897. struct bau_control *hmaster;
  898. hmaster = bcp->uvhub_master;
  899. spin_lock(&hmaster->disable_lock);
  900. if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
  901. stat->s_bau_reenabled++;
  902. for_each_present_cpu(tcpu) {
  903. tbcp = &per_cpu(bau_control, tcpu);
  904. if (tbcp->uvhub_master == hmaster) {
  905. tbcp->baudisabled = 0;
  906. tbcp->period_requests = 0;
  907. tbcp->period_time = 0;
  908. tbcp->period_giveups = 0;
  909. }
  910. }
  911. spin_unlock(&hmaster->disable_lock);
  912. return 0;
  913. }
  914. spin_unlock(&hmaster->disable_lock);
  915. return -1;
  916. }
  917. static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
  918. int remotes, struct bau_desc *bau_desc)
  919. {
  920. stat->s_requestor++;
  921. stat->s_ntargcpu += remotes + locals;
  922. stat->s_ntargremotes += remotes;
  923. stat->s_ntarglocals += locals;
  924. /* uvhub statistics */
  925. hubs = bau_uvhub_weight(&bau_desc->distribution);
  926. if (locals) {
  927. stat->s_ntarglocaluvhub++;
  928. stat->s_ntargremoteuvhub += (hubs - 1);
  929. } else
  930. stat->s_ntargremoteuvhub += hubs;
  931. stat->s_ntarguvhub += hubs;
  932. if (hubs >= 16)
  933. stat->s_ntarguvhub16++;
  934. else if (hubs >= 8)
  935. stat->s_ntarguvhub8++;
  936. else if (hubs >= 4)
  937. stat->s_ntarguvhub4++;
  938. else if (hubs >= 2)
  939. stat->s_ntarguvhub2++;
  940. else
  941. stat->s_ntarguvhub1++;
  942. }
  943. /*
  944. * Translate a cpu mask to the uvhub distribution mask in the BAU
  945. * activation descriptor.
  946. */
  947. static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  948. struct bau_desc *bau_desc, int *localsp, int *remotesp)
  949. {
  950. int cpu;
  951. int pnode;
  952. int cnt = 0;
  953. struct hub_and_pnode *hpp;
  954. for_each_cpu(cpu, flush_mask) {
  955. /*
  956. * The distribution vector is a bit map of pnodes, relative
  957. * to the partition base pnode (and the partition base nasid
  958. * in the header).
  959. * Translate cpu to pnode and hub using a local memory array.
  960. */
  961. hpp = &bcp->socket_master->thp[cpu];
  962. pnode = hpp->pnode - bcp->partition_base_pnode;
  963. bau_uvhub_set(pnode, &bau_desc->distribution);
  964. cnt++;
  965. if (hpp->uvhub == bcp->uvhub)
  966. (*localsp)++;
  967. else
  968. (*remotesp)++;
  969. }
  970. if (!cnt)
  971. return 1;
  972. return 0;
  973. }
  974. /*
  975. * globally purge translation cache of a virtual address or all TLB's
  976. * @cpumask: mask of all cpu's in which the address is to be removed
  977. * @mm: mm_struct containing virtual address range
  978. * @start: start virtual address to be removed from TLB
  979. * @end: end virtual address to be remove from TLB
  980. * @cpu: the current cpu
  981. *
  982. * This is the entry point for initiating any UV global TLB shootdown.
  983. *
  984. * Purges the translation caches of all specified processors of the given
  985. * virtual address, or purges all TLB's on specified processors.
  986. *
  987. * The caller has derived the cpumask from the mm_struct. This function
  988. * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
  989. *
  990. * The cpumask is converted into a uvhubmask of the uvhubs containing
  991. * those cpus.
  992. *
  993. * Note that this function should be called with preemption disabled.
  994. *
  995. * Returns NULL if all remote flushing was done.
  996. * Returns pointer to cpumask if some remote flushing remains to be
  997. * done. The returned pointer is valid till preemption is re-enabled.
  998. */
  999. const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
  1000. struct mm_struct *mm,
  1001. unsigned long start,
  1002. unsigned long end,
  1003. unsigned int cpu)
  1004. {
  1005. int locals = 0, remotes = 0, hubs = 0;
  1006. struct bau_desc *bau_desc;
  1007. struct cpumask *flush_mask;
  1008. struct ptc_stats *stat;
  1009. struct bau_control *bcp;
  1010. unsigned long descriptor_status, status, address;
  1011. bcp = &per_cpu(bau_control, cpu);
  1012. if (bcp->nobau)
  1013. return cpumask;
  1014. stat = bcp->statp;
  1015. stat->s_enters++;
  1016. if (bcp->busy) {
  1017. descriptor_status =
  1018. read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
  1019. status = ((descriptor_status >> (bcp->uvhub_cpu *
  1020. UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
  1021. if (status == UV2H_DESC_BUSY)
  1022. return cpumask;
  1023. bcp->busy = 0;
  1024. }
  1025. /* bau was disabled due to slow response */
  1026. if (bcp->baudisabled) {
  1027. if (check_enable(bcp, stat)) {
  1028. stat->s_ipifordisabled++;
  1029. return cpumask;
  1030. }
  1031. }
  1032. /*
  1033. * Each sending cpu has a per-cpu mask which it fills from the caller's
  1034. * cpu mask. All cpus are converted to uvhubs and copied to the
  1035. * activation descriptor.
  1036. */
  1037. flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
  1038. /* don't actually do a shootdown of the local cpu */
  1039. cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
  1040. if (cpumask_test_cpu(cpu, cpumask))
  1041. stat->s_ntargself++;
  1042. bau_desc = bcp->descriptor_base;
  1043. bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
  1044. bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
  1045. if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
  1046. return NULL;
  1047. record_send_statistics(stat, locals, hubs, remotes, bau_desc);
  1048. if (!end || (end - start) <= PAGE_SIZE)
  1049. address = start;
  1050. else
  1051. address = TLB_FLUSH_ALL;
  1052. switch (bcp->uvhub_version) {
  1053. case UV_BAU_V1:
  1054. case UV_BAU_V2:
  1055. case UV_BAU_V3:
  1056. bau_desc->payload.uv1_2_3.address = address;
  1057. bau_desc->payload.uv1_2_3.sending_cpu = cpu;
  1058. break;
  1059. case UV_BAU_V4:
  1060. bau_desc->payload.uv4.address = address;
  1061. bau_desc->payload.uv4.sending_cpu = cpu;
  1062. bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
  1063. break;
  1064. }
  1065. /*
  1066. * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
  1067. * or 1 if it gave up and the original cpumask should be returned.
  1068. */
  1069. if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
  1070. return NULL;
  1071. else
  1072. return cpumask;
  1073. }
  1074. /*
  1075. * Search the message queue for any 'other' unprocessed message with the
  1076. * same software acknowledge resource bit vector as the 'msg' message.
  1077. */
  1078. struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
  1079. struct bau_control *bcp)
  1080. {
  1081. struct bau_pq_entry *msg_next = msg + 1;
  1082. unsigned char swack_vec = msg->swack_vec;
  1083. if (msg_next > bcp->queue_last)
  1084. msg_next = bcp->queue_first;
  1085. while (msg_next != msg) {
  1086. if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
  1087. (msg_next->swack_vec == swack_vec))
  1088. return msg_next;
  1089. msg_next++;
  1090. if (msg_next > bcp->queue_last)
  1091. msg_next = bcp->queue_first;
  1092. }
  1093. return NULL;
  1094. }
  1095. /*
  1096. * UV2 needs to work around a bug in which an arriving message has not
  1097. * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
  1098. * Such a message must be ignored.
  1099. */
  1100. void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
  1101. {
  1102. unsigned long mmr_image;
  1103. unsigned char swack_vec;
  1104. struct bau_pq_entry *msg = mdp->msg;
  1105. struct bau_pq_entry *other_msg;
  1106. mmr_image = ops.read_l_sw_ack();
  1107. swack_vec = msg->swack_vec;
  1108. if ((swack_vec & mmr_image) == 0) {
  1109. /*
  1110. * This message was assigned a swack resource, but no
  1111. * reserved acknowlegment is pending.
  1112. * The bug has prevented this message from setting the MMR.
  1113. */
  1114. /*
  1115. * Some message has set the MMR 'pending' bit; it might have
  1116. * been another message. Look for that message.
  1117. */
  1118. other_msg = find_another_by_swack(msg, bcp);
  1119. if (other_msg) {
  1120. /*
  1121. * There is another. Process this one but do not
  1122. * ack it.
  1123. */
  1124. bau_process_message(mdp, bcp, 0);
  1125. /*
  1126. * Let the natural processing of that other message
  1127. * acknowledge it. Don't get the processing of sw_ack's
  1128. * out of order.
  1129. */
  1130. return;
  1131. }
  1132. }
  1133. /*
  1134. * Either the MMR shows this one pending a reply or there is no
  1135. * other message using this sw_ack, so it is safe to acknowledge it.
  1136. */
  1137. bau_process_message(mdp, bcp, 1);
  1138. return;
  1139. }
  1140. /*
  1141. * The BAU message interrupt comes here. (registered by set_intr_gate)
  1142. * See entry_64.S
  1143. *
  1144. * We received a broadcast assist message.
  1145. *
  1146. * Interrupts are disabled; this interrupt could represent
  1147. * the receipt of several messages.
  1148. *
  1149. * All cores/threads on this hub get this interrupt.
  1150. * The last one to see it does the software ack.
  1151. * (the resource will not be freed until noninterruptable cpus see this
  1152. * interrupt; hardware may timeout the s/w ack and reply ERROR)
  1153. */
  1154. void uv_bau_message_interrupt(struct pt_regs *regs)
  1155. {
  1156. int count = 0;
  1157. cycles_t time_start;
  1158. struct bau_pq_entry *msg;
  1159. struct bau_control *bcp;
  1160. struct ptc_stats *stat;
  1161. struct msg_desc msgdesc;
  1162. ack_APIC_irq();
  1163. time_start = get_cycles();
  1164. bcp = &per_cpu(bau_control, smp_processor_id());
  1165. stat = bcp->statp;
  1166. msgdesc.queue_first = bcp->queue_first;
  1167. msgdesc.queue_last = bcp->queue_last;
  1168. msg = bcp->bau_msg_head;
  1169. while (msg->swack_vec) {
  1170. count++;
  1171. msgdesc.msg_slot = msg - msgdesc.queue_first;
  1172. msgdesc.msg = msg;
  1173. if (bcp->uvhub_version == UV_BAU_V2)
  1174. process_uv2_message(&msgdesc, bcp);
  1175. else
  1176. /* no error workaround for uv1 or uv3 */
  1177. bau_process_message(&msgdesc, bcp, 1);
  1178. msg++;
  1179. if (msg > msgdesc.queue_last)
  1180. msg = msgdesc.queue_first;
  1181. bcp->bau_msg_head = msg;
  1182. }
  1183. stat->d_time += (get_cycles() - time_start);
  1184. if (!count)
  1185. stat->d_nomsg++;
  1186. else if (count > 1)
  1187. stat->d_multmsg++;
  1188. }
  1189. /*
  1190. * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
  1191. * shootdown message timeouts enabled. The timeout does not cause
  1192. * an interrupt, but causes an error message to be returned to
  1193. * the sender.
  1194. */
  1195. static void __init enable_timeouts(void)
  1196. {
  1197. int uvhub;
  1198. int nuvhubs;
  1199. int pnode;
  1200. unsigned long mmr_image;
  1201. nuvhubs = uv_num_possible_blades();
  1202. for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
  1203. if (!uv_blade_nr_possible_cpus(uvhub))
  1204. continue;
  1205. pnode = uv_blade_to_pnode(uvhub);
  1206. mmr_image = read_mmr_misc_control(pnode);
  1207. /*
  1208. * Set the timeout period and then lock it in, in three
  1209. * steps; captures and locks in the period.
  1210. *
  1211. * To program the period, the SOFT_ACK_MODE must be off.
  1212. */
  1213. mmr_image &= ~(1L << SOFTACK_MSHIFT);
  1214. write_mmr_misc_control(pnode, mmr_image);
  1215. /*
  1216. * Set the 4-bit period.
  1217. */
  1218. mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
  1219. mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
  1220. write_mmr_misc_control(pnode, mmr_image);
  1221. /*
  1222. * UV1:
  1223. * Subsequent reversals of the timebase bit (3) cause an
  1224. * immediate timeout of one or all INTD resources as
  1225. * indicated in bits 2:0 (7 causes all of them to timeout).
  1226. */
  1227. mmr_image |= (1L << SOFTACK_MSHIFT);
  1228. if (is_uv2_hub()) {
  1229. /* do not touch the legacy mode bit */
  1230. /* hw bug workaround; do not use extended status */
  1231. mmr_image &= ~(1L << UV2_EXT_SHFT);
  1232. } else if (is_uv3_hub()) {
  1233. mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
  1234. mmr_image |= (1L << SB_STATUS_SHFT);
  1235. }
  1236. write_mmr_misc_control(pnode, mmr_image);
  1237. }
  1238. }
  1239. static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
  1240. {
  1241. if (*offset < num_possible_cpus())
  1242. return offset;
  1243. return NULL;
  1244. }
  1245. static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
  1246. {
  1247. (*offset)++;
  1248. if (*offset < num_possible_cpus())
  1249. return offset;
  1250. return NULL;
  1251. }
  1252. static void ptc_seq_stop(struct seq_file *file, void *data)
  1253. {
  1254. }
  1255. /*
  1256. * Display the statistics thru /proc/sgi_uv/ptc_statistics
  1257. * 'data' points to the cpu number
  1258. * Note: see the descriptions in stat_description[].
  1259. */
  1260. static int ptc_seq_show(struct seq_file *file, void *data)
  1261. {
  1262. struct ptc_stats *stat;
  1263. struct bau_control *bcp;
  1264. int cpu;
  1265. cpu = *(loff_t *)data;
  1266. if (!cpu) {
  1267. seq_puts(file,
  1268. "# cpu bauoff sent stime self locals remotes ncpus localhub ");
  1269. seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
  1270. seq_puts(file,
  1271. "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
  1272. seq_puts(file,
  1273. "rok resetp resett giveup sto bz throt disable ");
  1274. seq_puts(file,
  1275. "enable wars warshw warwaits enters ipidis plugged ");
  1276. seq_puts(file,
  1277. "ipiover glim cong swack recv rtime all one mult ");
  1278. seq_puts(file, "none retry canc nocan reset rcan\n");
  1279. }
  1280. if (cpu < num_possible_cpus() && cpu_online(cpu)) {
  1281. bcp = &per_cpu(bau_control, cpu);
  1282. if (bcp->nobau) {
  1283. seq_printf(file, "cpu %d bau disabled\n", cpu);
  1284. return 0;
  1285. }
  1286. stat = bcp->statp;
  1287. /* source side statistics */
  1288. seq_printf(file,
  1289. "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
  1290. cpu, bcp->nobau, stat->s_requestor,
  1291. cycles_2_us(stat->s_time),
  1292. stat->s_ntargself, stat->s_ntarglocals,
  1293. stat->s_ntargremotes, stat->s_ntargcpu,
  1294. stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
  1295. stat->s_ntarguvhub, stat->s_ntarguvhub16);
  1296. seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
  1297. stat->s_ntarguvhub8, stat->s_ntarguvhub4,
  1298. stat->s_ntarguvhub2, stat->s_ntarguvhub1,
  1299. stat->s_dtimeout, stat->s_strongnacks);
  1300. seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
  1301. stat->s_retry_messages, stat->s_retriesok,
  1302. stat->s_resets_plug, stat->s_resets_timeout,
  1303. stat->s_giveup, stat->s_stimeout,
  1304. stat->s_busy, stat->s_throttles);
  1305. seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
  1306. stat->s_bau_disabled, stat->s_bau_reenabled,
  1307. stat->s_uv2_wars, stat->s_uv2_wars_hw,
  1308. stat->s_uv2_war_waits, stat->s_enters,
  1309. stat->s_ipifordisabled, stat->s_plugged,
  1310. stat->s_overipilimit, stat->s_giveuplimit,
  1311. stat->s_congested);
  1312. /* destination side statistics */
  1313. seq_printf(file,
  1314. "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
  1315. ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
  1316. stat->d_requestee, cycles_2_us(stat->d_time),
  1317. stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
  1318. stat->d_nomsg, stat->d_retries, stat->d_canceled,
  1319. stat->d_nocanceled, stat->d_resets,
  1320. stat->d_rcanceled);
  1321. }
  1322. return 0;
  1323. }
  1324. /*
  1325. * Display the tunables thru debugfs
  1326. */
  1327. static ssize_t tunables_read(struct file *file, char __user *userbuf,
  1328. size_t count, loff_t *ppos)
  1329. {
  1330. char *buf;
  1331. int ret;
  1332. buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
  1333. "max_concur plugged_delay plugsb4reset timeoutsb4reset",
  1334. "ipi_reset_limit complete_threshold congested_response_us",
  1335. "congested_reps disabled_period giveup_limit",
  1336. max_concurr, plugged_delay, plugsb4reset,
  1337. timeoutsb4reset, ipi_reset_limit, complete_threshold,
  1338. congested_respns_us, congested_reps, disabled_period,
  1339. giveup_limit);
  1340. if (!buf)
  1341. return -ENOMEM;
  1342. ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
  1343. kfree(buf);
  1344. return ret;
  1345. }
  1346. /*
  1347. * handle a write to /proc/sgi_uv/ptc_statistics
  1348. * -1: reset the statistics
  1349. * 0: display meaning of the statistics
  1350. */
  1351. static ssize_t ptc_proc_write(struct file *file, const char __user *user,
  1352. size_t count, loff_t *data)
  1353. {
  1354. int cpu;
  1355. int i;
  1356. int elements;
  1357. long input_arg;
  1358. char optstr[64];
  1359. struct ptc_stats *stat;
  1360. if (count == 0 || count > sizeof(optstr))
  1361. return -EINVAL;
  1362. if (copy_from_user(optstr, user, count))
  1363. return -EFAULT;
  1364. optstr[count - 1] = '\0';
  1365. if (!strcmp(optstr, "on")) {
  1366. set_bau_on();
  1367. return count;
  1368. } else if (!strcmp(optstr, "off")) {
  1369. set_bau_off();
  1370. return count;
  1371. }
  1372. if (kstrtol(optstr, 10, &input_arg) < 0) {
  1373. pr_debug("%s is invalid\n", optstr);
  1374. return -EINVAL;
  1375. }
  1376. if (input_arg == 0) {
  1377. elements = ARRAY_SIZE(stat_description);
  1378. pr_debug("# cpu: cpu number\n");
  1379. pr_debug("Sender statistics:\n");
  1380. for (i = 0; i < elements; i++)
  1381. pr_debug("%s\n", stat_description[i]);
  1382. } else if (input_arg == -1) {
  1383. for_each_present_cpu(cpu) {
  1384. stat = &per_cpu(ptcstats, cpu);
  1385. memset(stat, 0, sizeof(struct ptc_stats));
  1386. }
  1387. }
  1388. return count;
  1389. }
  1390. static int local_atoi(const char *name)
  1391. {
  1392. int val = 0;
  1393. for (;; name++) {
  1394. switch (*name) {
  1395. case '0' ... '9':
  1396. val = 10*val+(*name-'0');
  1397. break;
  1398. default:
  1399. return val;
  1400. }
  1401. }
  1402. }
  1403. /*
  1404. * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
  1405. * Zero values reset them to defaults.
  1406. */
  1407. static int parse_tunables_write(struct bau_control *bcp, char *instr,
  1408. int count)
  1409. {
  1410. char *p;
  1411. char *q;
  1412. int cnt = 0;
  1413. int val;
  1414. int e = ARRAY_SIZE(tunables);
  1415. p = instr + strspn(instr, WHITESPACE);
  1416. q = p;
  1417. for (; *p; p = q + strspn(q, WHITESPACE)) {
  1418. q = p + strcspn(p, WHITESPACE);
  1419. cnt++;
  1420. if (q == p)
  1421. break;
  1422. }
  1423. if (cnt != e) {
  1424. pr_info("bau tunable error: should be %d values\n", e);
  1425. return -EINVAL;
  1426. }
  1427. p = instr + strspn(instr, WHITESPACE);
  1428. q = p;
  1429. for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
  1430. q = p + strcspn(p, WHITESPACE);
  1431. val = local_atoi(p);
  1432. switch (cnt) {
  1433. case 0:
  1434. if (val == 0) {
  1435. max_concurr = MAX_BAU_CONCURRENT;
  1436. max_concurr_const = MAX_BAU_CONCURRENT;
  1437. continue;
  1438. }
  1439. if (val < 1 || val > bcp->cpus_in_uvhub) {
  1440. pr_debug(
  1441. "Error: BAU max concurrent %d is invalid\n",
  1442. val);
  1443. return -EINVAL;
  1444. }
  1445. max_concurr = val;
  1446. max_concurr_const = val;
  1447. continue;
  1448. default:
  1449. if (val == 0)
  1450. *tunables[cnt].tunp = tunables[cnt].deflt;
  1451. else
  1452. *tunables[cnt].tunp = val;
  1453. continue;
  1454. }
  1455. if (q == p)
  1456. break;
  1457. }
  1458. return 0;
  1459. }
  1460. /*
  1461. * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
  1462. */
  1463. static ssize_t tunables_write(struct file *file, const char __user *user,
  1464. size_t count, loff_t *data)
  1465. {
  1466. int cpu;
  1467. int ret;
  1468. char instr[100];
  1469. struct bau_control *bcp;
  1470. if (count == 0 || count > sizeof(instr)-1)
  1471. return -EINVAL;
  1472. if (copy_from_user(instr, user, count))
  1473. return -EFAULT;
  1474. instr[count] = '\0';
  1475. cpu = get_cpu();
  1476. bcp = &per_cpu(bau_control, cpu);
  1477. ret = parse_tunables_write(bcp, instr, count);
  1478. put_cpu();
  1479. if (ret)
  1480. return ret;
  1481. for_each_present_cpu(cpu) {
  1482. bcp = &per_cpu(bau_control, cpu);
  1483. bcp->max_concurr = max_concurr;
  1484. bcp->max_concurr_const = max_concurr;
  1485. bcp->plugged_delay = plugged_delay;
  1486. bcp->plugsb4reset = plugsb4reset;
  1487. bcp->timeoutsb4reset = timeoutsb4reset;
  1488. bcp->ipi_reset_limit = ipi_reset_limit;
  1489. bcp->complete_threshold = complete_threshold;
  1490. bcp->cong_response_us = congested_respns_us;
  1491. bcp->cong_reps = congested_reps;
  1492. bcp->disabled_period = sec_2_cycles(disabled_period);
  1493. bcp->giveup_limit = giveup_limit;
  1494. }
  1495. return count;
  1496. }
  1497. static const struct seq_operations uv_ptc_seq_ops = {
  1498. .start = ptc_seq_start,
  1499. .next = ptc_seq_next,
  1500. .stop = ptc_seq_stop,
  1501. .show = ptc_seq_show
  1502. };
  1503. static int ptc_proc_open(struct inode *inode, struct file *file)
  1504. {
  1505. return seq_open(file, &uv_ptc_seq_ops);
  1506. }
  1507. static int tunables_open(struct inode *inode, struct file *file)
  1508. {
  1509. return 0;
  1510. }
  1511. static const struct file_operations proc_uv_ptc_operations = {
  1512. .open = ptc_proc_open,
  1513. .read = seq_read,
  1514. .write = ptc_proc_write,
  1515. .llseek = seq_lseek,
  1516. .release = seq_release,
  1517. };
  1518. static const struct file_operations tunables_fops = {
  1519. .open = tunables_open,
  1520. .read = tunables_read,
  1521. .write = tunables_write,
  1522. .llseek = default_llseek,
  1523. };
  1524. static int __init uv_ptc_init(void)
  1525. {
  1526. struct proc_dir_entry *proc_uv_ptc;
  1527. if (!is_uv_system())
  1528. return 0;
  1529. proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
  1530. &proc_uv_ptc_operations);
  1531. if (!proc_uv_ptc) {
  1532. pr_err("unable to create %s proc entry\n",
  1533. UV_PTC_BASENAME);
  1534. return -EINVAL;
  1535. }
  1536. tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
  1537. if (!tunables_dir) {
  1538. pr_err("unable to create debugfs directory %s\n",
  1539. UV_BAU_TUNABLES_DIR);
  1540. return -EINVAL;
  1541. }
  1542. tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
  1543. tunables_dir, NULL, &tunables_fops);
  1544. if (!tunables_file) {
  1545. pr_err("unable to create debugfs file %s\n",
  1546. UV_BAU_TUNABLES_FILE);
  1547. return -EINVAL;
  1548. }
  1549. return 0;
  1550. }
  1551. /*
  1552. * Initialize the sending side's sending buffers.
  1553. */
  1554. static void activation_descriptor_init(int node, int pnode, int base_pnode)
  1555. {
  1556. int i;
  1557. int cpu;
  1558. int uv1 = 0;
  1559. unsigned long gpa;
  1560. unsigned long m;
  1561. unsigned long n;
  1562. size_t dsize;
  1563. struct bau_desc *bau_desc;
  1564. struct bau_desc *bd2;
  1565. struct uv1_bau_msg_header *uv1_hdr;
  1566. struct uv2_3_bau_msg_header *uv2_3_hdr;
  1567. struct bau_control *bcp;
  1568. /*
  1569. * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
  1570. * per cpu; and one per cpu on the uvhub (ADP_SZ)
  1571. */
  1572. dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
  1573. bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
  1574. BUG_ON(!bau_desc);
  1575. gpa = uv_gpa(bau_desc);
  1576. n = uv_gpa_to_gnode(gpa);
  1577. m = ops.bau_gpa_to_offset(gpa);
  1578. if (is_uv1_hub())
  1579. uv1 = 1;
  1580. /* the 14-bit pnode */
  1581. write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
  1582. /*
  1583. * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
  1584. * cpu even though we only use the first one; one descriptor can
  1585. * describe a broadcast to 256 uv hubs.
  1586. */
  1587. for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
  1588. memset(bd2, 0, sizeof(struct bau_desc));
  1589. if (uv1) {
  1590. uv1_hdr = &bd2->header.uv1_hdr;
  1591. uv1_hdr->swack_flag = 1;
  1592. /*
  1593. * The base_dest_nasid set in the message header
  1594. * is the nasid of the first uvhub in the partition.
  1595. * The bit map will indicate destination pnode numbers
  1596. * relative to that base. They may not be consecutive
  1597. * if nasid striding is being used.
  1598. */
  1599. uv1_hdr->base_dest_nasid =
  1600. UV_PNODE_TO_NASID(base_pnode);
  1601. uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
  1602. uv1_hdr->command = UV_NET_ENDPOINT_INTD;
  1603. uv1_hdr->int_both = 1;
  1604. /*
  1605. * all others need to be set to zero:
  1606. * fairness chaining multilevel count replied_to
  1607. */
  1608. } else {
  1609. /*
  1610. * BIOS uses legacy mode, but uv2 and uv3 hardware always
  1611. * uses native mode for selective broadcasts.
  1612. */
  1613. uv2_3_hdr = &bd2->header.uv2_3_hdr;
  1614. uv2_3_hdr->swack_flag = 1;
  1615. uv2_3_hdr->base_dest_nasid =
  1616. UV_PNODE_TO_NASID(base_pnode);
  1617. uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
  1618. uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
  1619. }
  1620. }
  1621. for_each_present_cpu(cpu) {
  1622. if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
  1623. continue;
  1624. bcp = &per_cpu(bau_control, cpu);
  1625. bcp->descriptor_base = bau_desc;
  1626. }
  1627. }
  1628. /*
  1629. * initialize the destination side's receiving buffers
  1630. * entered for each uvhub in the partition
  1631. * - node is first node (kernel memory notion) on the uvhub
  1632. * - pnode is the uvhub's physical identifier
  1633. */
  1634. static void pq_init(int node, int pnode)
  1635. {
  1636. int cpu;
  1637. size_t plsize;
  1638. char *cp;
  1639. void *vp;
  1640. unsigned long gnode, first, last, tail;
  1641. struct bau_pq_entry *pqp;
  1642. struct bau_control *bcp;
  1643. plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
  1644. vp = kmalloc_node(plsize, GFP_KERNEL, node);
  1645. pqp = (struct bau_pq_entry *)vp;
  1646. BUG_ON(!pqp);
  1647. cp = (char *)pqp + 31;
  1648. pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
  1649. for_each_present_cpu(cpu) {
  1650. if (pnode != uv_cpu_to_pnode(cpu))
  1651. continue;
  1652. /* for every cpu on this pnode: */
  1653. bcp = &per_cpu(bau_control, cpu);
  1654. bcp->queue_first = pqp;
  1655. bcp->bau_msg_head = pqp;
  1656. bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
  1657. }
  1658. first = ops.bau_gpa_to_offset(uv_gpa(pqp));
  1659. last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
  1660. /*
  1661. * Pre UV4, the gnode is required to locate the payload queue
  1662. * and the payload queue tail must be maintained by the kernel.
  1663. */
  1664. bcp = &per_cpu(bau_control, smp_processor_id());
  1665. if (bcp->uvhub_version <= UV_BAU_V3) {
  1666. tail = first;
  1667. gnode = uv_gpa_to_gnode(uv_gpa(pqp));
  1668. first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
  1669. write_mmr_payload_tail(pnode, tail);
  1670. }
  1671. ops.write_payload_first(pnode, first);
  1672. ops.write_payload_last(pnode, last);
  1673. /* in effect, all msg_type's are set to MSG_NOOP */
  1674. memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
  1675. }
  1676. /*
  1677. * Initialization of each UV hub's structures
  1678. */
  1679. static void __init init_uvhub(int uvhub, int vector, int base_pnode)
  1680. {
  1681. int node;
  1682. int pnode;
  1683. unsigned long apicid;
  1684. node = uvhub_to_first_node(uvhub);
  1685. pnode = uv_blade_to_pnode(uvhub);
  1686. activation_descriptor_init(node, pnode, base_pnode);
  1687. pq_init(node, pnode);
  1688. /*
  1689. * The below initialization can't be in firmware because the
  1690. * messaging IRQ will be determined by the OS.
  1691. */
  1692. apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
  1693. write_mmr_data_config(pnode, ((apicid << 32) | vector));
  1694. }
  1695. /*
  1696. * We will set BAU_MISC_CONTROL with a timeout period.
  1697. * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
  1698. * So the destination timeout period has to be calculated from them.
  1699. */
  1700. static int calculate_destination_timeout(void)
  1701. {
  1702. unsigned long mmr_image;
  1703. int mult1;
  1704. int mult2;
  1705. int index;
  1706. int base;
  1707. int ret;
  1708. unsigned long ts_ns;
  1709. if (is_uv1_hub()) {
  1710. mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
  1711. mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
  1712. index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
  1713. mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
  1714. mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
  1715. ts_ns = timeout_base_ns[index];
  1716. ts_ns *= (mult1 * mult2);
  1717. ret = ts_ns / 1000;
  1718. } else {
  1719. /* same destination timeout for uv2 and uv3 */
  1720. /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
  1721. mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
  1722. mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
  1723. if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
  1724. base = 80;
  1725. else
  1726. base = 10;
  1727. mult1 = mmr_image & UV2_ACK_MASK;
  1728. ret = mult1 * base;
  1729. }
  1730. return ret;
  1731. }
  1732. static void __init init_per_cpu_tunables(void)
  1733. {
  1734. int cpu;
  1735. struct bau_control *bcp;
  1736. for_each_present_cpu(cpu) {
  1737. bcp = &per_cpu(bau_control, cpu);
  1738. bcp->baudisabled = 0;
  1739. if (nobau)
  1740. bcp->nobau = true;
  1741. bcp->statp = &per_cpu(ptcstats, cpu);
  1742. /* time interval to catch a hardware stay-busy bug */
  1743. bcp->timeout_interval = usec_2_cycles(2*timeout_us);
  1744. bcp->max_concurr = max_concurr;
  1745. bcp->max_concurr_const = max_concurr;
  1746. bcp->plugged_delay = plugged_delay;
  1747. bcp->plugsb4reset = plugsb4reset;
  1748. bcp->timeoutsb4reset = timeoutsb4reset;
  1749. bcp->ipi_reset_limit = ipi_reset_limit;
  1750. bcp->complete_threshold = complete_threshold;
  1751. bcp->cong_response_us = congested_respns_us;
  1752. bcp->cong_reps = congested_reps;
  1753. bcp->disabled_period = sec_2_cycles(disabled_period);
  1754. bcp->giveup_limit = giveup_limit;
  1755. spin_lock_init(&bcp->queue_lock);
  1756. spin_lock_init(&bcp->uvhub_lock);
  1757. spin_lock_init(&bcp->disable_lock);
  1758. }
  1759. }
  1760. /*
  1761. * Scan all cpus to collect blade and socket summaries.
  1762. */
  1763. static int __init get_cpu_topology(int base_pnode,
  1764. struct uvhub_desc *uvhub_descs,
  1765. unsigned char *uvhub_mask)
  1766. {
  1767. int cpu;
  1768. int pnode;
  1769. int uvhub;
  1770. int socket;
  1771. struct bau_control *bcp;
  1772. struct uvhub_desc *bdp;
  1773. struct socket_desc *sdp;
  1774. for_each_present_cpu(cpu) {
  1775. bcp = &per_cpu(bau_control, cpu);
  1776. memset(bcp, 0, sizeof(struct bau_control));
  1777. pnode = uv_cpu_hub_info(cpu)->pnode;
  1778. if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
  1779. pr_emerg(
  1780. "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
  1781. cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
  1782. return 1;
  1783. }
  1784. bcp->osnode = cpu_to_node(cpu);
  1785. bcp->partition_base_pnode = base_pnode;
  1786. uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
  1787. *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
  1788. bdp = &uvhub_descs[uvhub];
  1789. bdp->num_cpus++;
  1790. bdp->uvhub = uvhub;
  1791. bdp->pnode = pnode;
  1792. /* kludge: 'assuming' one node per socket, and assuming that
  1793. disabling a socket just leaves a gap in node numbers */
  1794. socket = bcp->osnode & 1;
  1795. bdp->socket_mask |= (1 << socket);
  1796. sdp = &bdp->socket[socket];
  1797. sdp->cpu_number[sdp->num_cpus] = cpu;
  1798. sdp->num_cpus++;
  1799. if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
  1800. pr_emerg("%d cpus per socket invalid\n",
  1801. sdp->num_cpus);
  1802. return 1;
  1803. }
  1804. }
  1805. return 0;
  1806. }
  1807. /*
  1808. * Each socket is to get a local array of pnodes/hubs.
  1809. */
  1810. static void make_per_cpu_thp(struct bau_control *smaster)
  1811. {
  1812. int cpu;
  1813. size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
  1814. smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
  1815. memset(smaster->thp, 0, hpsz);
  1816. for_each_present_cpu(cpu) {
  1817. smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
  1818. smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
  1819. }
  1820. }
  1821. /*
  1822. * Each uvhub is to get a local cpumask.
  1823. */
  1824. static void make_per_hub_cpumask(struct bau_control *hmaster)
  1825. {
  1826. int sz = sizeof(cpumask_t);
  1827. hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
  1828. }
  1829. /*
  1830. * Initialize all the per_cpu information for the cpu's on a given socket,
  1831. * given what has been gathered into the socket_desc struct.
  1832. * And reports the chosen hub and socket masters back to the caller.
  1833. */
  1834. static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
  1835. struct bau_control **smasterp,
  1836. struct bau_control **hmasterp)
  1837. {
  1838. int i, cpu, uvhub_cpu;
  1839. struct bau_control *bcp;
  1840. for (i = 0; i < sdp->num_cpus; i++) {
  1841. cpu = sdp->cpu_number[i];
  1842. bcp = &per_cpu(bau_control, cpu);
  1843. bcp->cpu = cpu;
  1844. if (i == 0) {
  1845. *smasterp = bcp;
  1846. if (!(*hmasterp))
  1847. *hmasterp = bcp;
  1848. }
  1849. bcp->cpus_in_uvhub = bdp->num_cpus;
  1850. bcp->cpus_in_socket = sdp->num_cpus;
  1851. bcp->socket_master = *smasterp;
  1852. bcp->uvhub = bdp->uvhub;
  1853. if (is_uv1_hub())
  1854. bcp->uvhub_version = UV_BAU_V1;
  1855. else if (is_uv2_hub())
  1856. bcp->uvhub_version = UV_BAU_V2;
  1857. else if (is_uv3_hub())
  1858. bcp->uvhub_version = UV_BAU_V3;
  1859. else if (is_uv4_hub())
  1860. bcp->uvhub_version = UV_BAU_V4;
  1861. else {
  1862. pr_emerg("uvhub version not 1, 2, 3, or 4\n");
  1863. return 1;
  1864. }
  1865. bcp->uvhub_master = *hmasterp;
  1866. uvhub_cpu = uv_cpu_blade_processor_id(cpu);
  1867. bcp->uvhub_cpu = uvhub_cpu;
  1868. /*
  1869. * The ERROR and BUSY status registers are located pairwise over
  1870. * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
  1871. */
  1872. if (uvhub_cpu < UV_CPUS_PER_AS) {
  1873. bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
  1874. bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
  1875. } else {
  1876. bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
  1877. bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
  1878. * UV_ACT_STATUS_SIZE;
  1879. }
  1880. if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
  1881. pr_emerg("%d cpus per uvhub invalid\n",
  1882. bcp->uvhub_cpu);
  1883. return 1;
  1884. }
  1885. }
  1886. return 0;
  1887. }
  1888. /*
  1889. * Summarize the blade and socket topology into the per_cpu structures.
  1890. */
  1891. static int __init summarize_uvhub_sockets(int nuvhubs,
  1892. struct uvhub_desc *uvhub_descs,
  1893. unsigned char *uvhub_mask)
  1894. {
  1895. int socket;
  1896. int uvhub;
  1897. unsigned short socket_mask;
  1898. for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
  1899. struct uvhub_desc *bdp;
  1900. struct bau_control *smaster = NULL;
  1901. struct bau_control *hmaster = NULL;
  1902. if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
  1903. continue;
  1904. bdp = &uvhub_descs[uvhub];
  1905. socket_mask = bdp->socket_mask;
  1906. socket = 0;
  1907. while (socket_mask) {
  1908. struct socket_desc *sdp;
  1909. if ((socket_mask & 1)) {
  1910. sdp = &bdp->socket[socket];
  1911. if (scan_sock(sdp, bdp, &smaster, &hmaster))
  1912. return 1;
  1913. make_per_cpu_thp(smaster);
  1914. }
  1915. socket++;
  1916. socket_mask = (socket_mask >> 1);
  1917. }
  1918. make_per_hub_cpumask(hmaster);
  1919. }
  1920. return 0;
  1921. }
  1922. /*
  1923. * initialize the bau_control structure for each cpu
  1924. */
  1925. static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
  1926. {
  1927. unsigned char *uvhub_mask;
  1928. void *vp;
  1929. struct uvhub_desc *uvhub_descs;
  1930. if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
  1931. timeout_us = calculate_destination_timeout();
  1932. vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
  1933. uvhub_descs = (struct uvhub_desc *)vp;
  1934. memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
  1935. uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
  1936. if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
  1937. goto fail;
  1938. if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
  1939. goto fail;
  1940. kfree(uvhub_descs);
  1941. kfree(uvhub_mask);
  1942. init_per_cpu_tunables();
  1943. return 0;
  1944. fail:
  1945. kfree(uvhub_descs);
  1946. kfree(uvhub_mask);
  1947. return 1;
  1948. }
  1949. static const struct bau_operations uv1_bau_ops __initconst = {
  1950. .bau_gpa_to_offset = uv_gpa_to_offset,
  1951. .read_l_sw_ack = read_mmr_sw_ack,
  1952. .read_g_sw_ack = read_gmmr_sw_ack,
  1953. .write_l_sw_ack = write_mmr_sw_ack,
  1954. .write_g_sw_ack = write_gmmr_sw_ack,
  1955. .write_payload_first = write_mmr_payload_first,
  1956. .write_payload_last = write_mmr_payload_last,
  1957. .wait_completion = uv1_wait_completion,
  1958. };
  1959. static const struct bau_operations uv2_3_bau_ops __initconst = {
  1960. .bau_gpa_to_offset = uv_gpa_to_offset,
  1961. .read_l_sw_ack = read_mmr_sw_ack,
  1962. .read_g_sw_ack = read_gmmr_sw_ack,
  1963. .write_l_sw_ack = write_mmr_sw_ack,
  1964. .write_g_sw_ack = write_gmmr_sw_ack,
  1965. .write_payload_first = write_mmr_payload_first,
  1966. .write_payload_last = write_mmr_payload_last,
  1967. .wait_completion = uv2_3_wait_completion,
  1968. };
  1969. static const struct bau_operations uv4_bau_ops __initconst = {
  1970. .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
  1971. .read_l_sw_ack = read_mmr_proc_sw_ack,
  1972. .read_g_sw_ack = read_gmmr_proc_sw_ack,
  1973. .write_l_sw_ack = write_mmr_proc_sw_ack,
  1974. .write_g_sw_ack = write_gmmr_proc_sw_ack,
  1975. .write_payload_first = write_mmr_proc_payload_first,
  1976. .write_payload_last = write_mmr_proc_payload_last,
  1977. .wait_completion = uv4_wait_completion,
  1978. };
  1979. /*
  1980. * Initialization of BAU-related structures
  1981. */
  1982. static int __init uv_bau_init(void)
  1983. {
  1984. int uvhub;
  1985. int pnode;
  1986. int nuvhubs;
  1987. int cur_cpu;
  1988. int cpus;
  1989. int vector;
  1990. cpumask_var_t *mask;
  1991. if (!is_uv_system())
  1992. return 0;
  1993. if (is_uv4_hub())
  1994. ops = uv4_bau_ops;
  1995. else if (is_uv3_hub())
  1996. ops = uv2_3_bau_ops;
  1997. else if (is_uv2_hub())
  1998. ops = uv2_3_bau_ops;
  1999. else if (is_uv1_hub())
  2000. ops = uv1_bau_ops;
  2001. for_each_possible_cpu(cur_cpu) {
  2002. mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
  2003. zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
  2004. }
  2005. nuvhubs = uv_num_possible_blades();
  2006. congested_cycles = usec_2_cycles(congested_respns_us);
  2007. uv_base_pnode = 0x7fffffff;
  2008. for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
  2009. cpus = uv_blade_nr_possible_cpus(uvhub);
  2010. if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
  2011. uv_base_pnode = uv_blade_to_pnode(uvhub);
  2012. }
  2013. /* software timeouts are not supported on UV4 */
  2014. if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
  2015. enable_timeouts();
  2016. if (init_per_cpu(nuvhubs, uv_base_pnode)) {
  2017. set_bau_off();
  2018. nobau_perm = 1;
  2019. return 0;
  2020. }
  2021. vector = UV_BAU_MESSAGE;
  2022. for_each_possible_blade(uvhub) {
  2023. if (uv_blade_nr_possible_cpus(uvhub))
  2024. init_uvhub(uvhub, vector, uv_base_pnode);
  2025. }
  2026. alloc_intr_gate(vector, uv_bau_message_intr1);
  2027. for_each_possible_blade(uvhub) {
  2028. if (uv_blade_nr_possible_cpus(uvhub)) {
  2029. unsigned long val;
  2030. unsigned long mmr;
  2031. pnode = uv_blade_to_pnode(uvhub);
  2032. /* INIT the bau */
  2033. val = 1L << 63;
  2034. write_gmmr_activation(pnode, val);
  2035. mmr = 1; /* should be 1 to broadcast to both sockets */
  2036. if (!is_uv1_hub())
  2037. write_mmr_data_broadcast(pnode, mmr);
  2038. }
  2039. }
  2040. return 0;
  2041. }
  2042. core_initcall(uv_bau_init);
  2043. fs_initcall(uv_ptc_init);