x2apic_uv_x.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV APIC functions (note: not an Intel compatible APIC)
  7. *
  8. * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/cpumask.h>
  11. #include <linux/hardirq.h>
  12. #include <linux/proc_fs.h>
  13. #include <linux/threads.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/string.h>
  17. #include <linux/ctype.h>
  18. #include <linux/sched.h>
  19. #include <linux/timer.h>
  20. #include <linux/slab.h>
  21. #include <linux/cpu.h>
  22. #include <linux/init.h>
  23. #include <linux/io.h>
  24. #include <linux/pci.h>
  25. #include <linux/kdebug.h>
  26. #include <linux/delay.h>
  27. #include <linux/crash_dump.h>
  28. #include <linux/reboot.h>
  29. #include <asm/uv/uv_mmrs.h>
  30. #include <asm/uv/uv_hub.h>
  31. #include <asm/current.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/uv/bios.h>
  34. #include <asm/uv/uv.h>
  35. #include <asm/apic.h>
  36. #include <asm/ipi.h>
  37. #include <asm/smp.h>
  38. #include <asm/x86_init.h>
  39. #include <asm/nmi.h>
  40. DEFINE_PER_CPU(int, x2apic_extra_bits);
  41. #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
  42. static enum uv_system_type uv_system_type;
  43. static u64 gru_start_paddr, gru_end_paddr;
  44. static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
  45. static u64 gru_dist_lmask, gru_dist_umask;
  46. static union uvh_apicid uvh_apicid;
  47. int uv_min_hub_revision_id;
  48. EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
  49. unsigned int uv_apicid_hibits;
  50. EXPORT_SYMBOL_GPL(uv_apicid_hibits);
  51. static struct apic apic_x2apic_uv_x;
  52. static unsigned long __init uv_early_read_mmr(unsigned long addr)
  53. {
  54. unsigned long val, *mmr;
  55. mmr = early_ioremap(UV_LOCAL_MMR_BASE | addr, sizeof(*mmr));
  56. val = *mmr;
  57. early_iounmap(mmr, sizeof(*mmr));
  58. return val;
  59. }
  60. static inline bool is_GRU_range(u64 start, u64 end)
  61. {
  62. if (gru_dist_base) {
  63. u64 su = start & gru_dist_umask; /* upper (incl pnode) bits */
  64. u64 sl = start & gru_dist_lmask; /* base offset bits */
  65. u64 eu = end & gru_dist_umask;
  66. u64 el = end & gru_dist_lmask;
  67. /* Must reside completely within a single GRU range */
  68. return (sl == gru_dist_base && el == gru_dist_base &&
  69. su >= gru_first_node_paddr &&
  70. su <= gru_last_node_paddr &&
  71. eu == su);
  72. } else {
  73. return start >= gru_start_paddr && end <= gru_end_paddr;
  74. }
  75. }
  76. static bool uv_is_untracked_pat_range(u64 start, u64 end)
  77. {
  78. return is_ISA_range(start, end) || is_GRU_range(start, end);
  79. }
  80. static int __init early_get_pnodeid(void)
  81. {
  82. union uvh_node_id_u node_id;
  83. union uvh_rh_gam_config_mmr_u m_n_config;
  84. int pnode;
  85. /* Currently, all blades have same revision number */
  86. node_id.v = uv_early_read_mmr(UVH_NODE_ID);
  87. m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
  88. uv_min_hub_revision_id = node_id.s.revision;
  89. switch (node_id.s.part_number) {
  90. case UV2_HUB_PART_NUMBER:
  91. case UV2_HUB_PART_NUMBER_X:
  92. uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
  93. break;
  94. case UV3_HUB_PART_NUMBER:
  95. case UV3_HUB_PART_NUMBER_X:
  96. uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
  97. break;
  98. }
  99. uv_hub_info->hub_revision = uv_min_hub_revision_id;
  100. pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
  101. return pnode;
  102. }
  103. static void __init early_get_apic_pnode_shift(void)
  104. {
  105. uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
  106. if (!uvh_apicid.v)
  107. /*
  108. * Old bios, use default value
  109. */
  110. uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
  111. }
  112. /*
  113. * Add an extra bit as dictated by bios to the destination apicid of
  114. * interrupts potentially passing through the UV HUB. This prevents
  115. * a deadlock between interrupts and IO port operations.
  116. */
  117. static void __init uv_set_apicid_hibit(void)
  118. {
  119. union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
  120. if (is_uv1_hub()) {
  121. apicid_mask.v =
  122. uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
  123. uv_apicid_hibits =
  124. apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
  125. }
  126. }
  127. static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  128. {
  129. int pnodeid, is_uv1, is_uv2, is_uv3;
  130. is_uv1 = !strcmp(oem_id, "SGI");
  131. is_uv2 = !strcmp(oem_id, "SGI2");
  132. is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */
  133. if (is_uv1 || is_uv2 || is_uv3) {
  134. uv_hub_info->hub_revision =
  135. (is_uv1 ? UV1_HUB_REVISION_BASE :
  136. (is_uv2 ? UV2_HUB_REVISION_BASE :
  137. UV3_HUB_REVISION_BASE));
  138. pnodeid = early_get_pnodeid();
  139. early_get_apic_pnode_shift();
  140. x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
  141. x86_platform.nmi_init = uv_nmi_init;
  142. if (!strcmp(oem_table_id, "UVL"))
  143. uv_system_type = UV_LEGACY_APIC;
  144. else if (!strcmp(oem_table_id, "UVX"))
  145. uv_system_type = UV_X2APIC;
  146. else if (!strcmp(oem_table_id, "UVH")) {
  147. __this_cpu_write(x2apic_extra_bits,
  148. pnodeid << uvh_apicid.s.pnode_shift);
  149. uv_system_type = UV_NON_UNIQUE_APIC;
  150. uv_set_apicid_hibit();
  151. return 1;
  152. }
  153. }
  154. return 0;
  155. }
  156. enum uv_system_type get_uv_system_type(void)
  157. {
  158. return uv_system_type;
  159. }
  160. int is_uv_system(void)
  161. {
  162. return uv_system_type != UV_NONE;
  163. }
  164. EXPORT_SYMBOL_GPL(is_uv_system);
  165. DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
  166. EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
  167. struct uv_blade_info *uv_blade_info;
  168. EXPORT_SYMBOL_GPL(uv_blade_info);
  169. short *uv_node_to_blade;
  170. EXPORT_SYMBOL_GPL(uv_node_to_blade);
  171. short *uv_cpu_to_blade;
  172. EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
  173. short uv_possible_blades;
  174. EXPORT_SYMBOL_GPL(uv_possible_blades);
  175. unsigned long sn_rtc_cycles_per_second;
  176. EXPORT_SYMBOL(sn_rtc_cycles_per_second);
  177. static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
  178. {
  179. #ifdef CONFIG_SMP
  180. unsigned long val;
  181. int pnode;
  182. pnode = uv_apicid_to_pnode(phys_apicid);
  183. phys_apicid |= uv_apicid_hibits;
  184. val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  185. (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  186. ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
  187. APIC_DM_INIT;
  188. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  189. val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  190. (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  191. ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
  192. APIC_DM_STARTUP;
  193. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  194. atomic_set(&init_deasserted, 1);
  195. #endif
  196. return 0;
  197. }
  198. static void uv_send_IPI_one(int cpu, int vector)
  199. {
  200. unsigned long apicid;
  201. int pnode;
  202. apicid = per_cpu(x86_cpu_to_apicid, cpu);
  203. pnode = uv_apicid_to_pnode(apicid);
  204. uv_hub_send_ipi(pnode, apicid, vector);
  205. }
  206. static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
  207. {
  208. unsigned int cpu;
  209. for_each_cpu(cpu, mask)
  210. uv_send_IPI_one(cpu, vector);
  211. }
  212. static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  213. {
  214. unsigned int this_cpu = smp_processor_id();
  215. unsigned int cpu;
  216. for_each_cpu(cpu, mask) {
  217. if (cpu != this_cpu)
  218. uv_send_IPI_one(cpu, vector);
  219. }
  220. }
  221. static void uv_send_IPI_allbutself(int vector)
  222. {
  223. unsigned int this_cpu = smp_processor_id();
  224. unsigned int cpu;
  225. for_each_online_cpu(cpu) {
  226. if (cpu != this_cpu)
  227. uv_send_IPI_one(cpu, vector);
  228. }
  229. }
  230. static void uv_send_IPI_all(int vector)
  231. {
  232. uv_send_IPI_mask(cpu_online_mask, vector);
  233. }
  234. static int uv_apic_id_valid(int apicid)
  235. {
  236. return 1;
  237. }
  238. static int uv_apic_id_registered(void)
  239. {
  240. return 1;
  241. }
  242. static void uv_init_apic_ldr(void)
  243. {
  244. }
  245. static int
  246. uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
  247. const struct cpumask *andmask,
  248. unsigned int *apicid)
  249. {
  250. int unsigned cpu;
  251. /*
  252. * We're using fixed IRQ delivery, can only return one phys APIC ID.
  253. * May as well be the first.
  254. */
  255. for_each_cpu_and(cpu, cpumask, andmask) {
  256. if (cpumask_test_cpu(cpu, cpu_online_mask))
  257. break;
  258. }
  259. if (likely(cpu < nr_cpu_ids)) {
  260. *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
  261. return 0;
  262. }
  263. return -EINVAL;
  264. }
  265. static unsigned int x2apic_get_apic_id(unsigned long x)
  266. {
  267. unsigned int id;
  268. WARN_ON(preemptible() && num_online_cpus() > 1);
  269. id = x | __this_cpu_read(x2apic_extra_bits);
  270. return id;
  271. }
  272. static unsigned long set_apic_id(unsigned int id)
  273. {
  274. unsigned long x;
  275. /* maskout x2apic_extra_bits ? */
  276. x = id;
  277. return x;
  278. }
  279. static unsigned int uv_read_apic_id(void)
  280. {
  281. return x2apic_get_apic_id(apic_read(APIC_ID));
  282. }
  283. static int uv_phys_pkg_id(int initial_apicid, int index_msb)
  284. {
  285. return uv_read_apic_id() >> index_msb;
  286. }
  287. static void uv_send_IPI_self(int vector)
  288. {
  289. apic_write(APIC_SELF_IPI, vector);
  290. }
  291. static int uv_probe(void)
  292. {
  293. return apic == &apic_x2apic_uv_x;
  294. }
  295. static struct apic __refdata apic_x2apic_uv_x = {
  296. .name = "UV large system",
  297. .probe = uv_probe,
  298. .acpi_madt_oem_check = uv_acpi_madt_oem_check,
  299. .apic_id_valid = uv_apic_id_valid,
  300. .apic_id_registered = uv_apic_id_registered,
  301. .irq_delivery_mode = dest_Fixed,
  302. .irq_dest_mode = 0, /* physical */
  303. .target_cpus = online_target_cpus,
  304. .disable_esr = 0,
  305. .dest_logical = APIC_DEST_LOGICAL,
  306. .check_apicid_used = NULL,
  307. .check_apicid_present = NULL,
  308. .vector_allocation_domain = default_vector_allocation_domain,
  309. .init_apic_ldr = uv_init_apic_ldr,
  310. .ioapic_phys_id_map = NULL,
  311. .setup_apic_routing = NULL,
  312. .multi_timer_check = NULL,
  313. .cpu_present_to_apicid = default_cpu_present_to_apicid,
  314. .apicid_to_cpu_present = NULL,
  315. .setup_portio_remap = NULL,
  316. .check_phys_apicid_present = default_check_phys_apicid_present,
  317. .enable_apic_mode = NULL,
  318. .phys_pkg_id = uv_phys_pkg_id,
  319. .mps_oem_check = NULL,
  320. .get_apic_id = x2apic_get_apic_id,
  321. .set_apic_id = set_apic_id,
  322. .apic_id_mask = 0xFFFFFFFFu,
  323. .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
  324. .send_IPI_mask = uv_send_IPI_mask,
  325. .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
  326. .send_IPI_allbutself = uv_send_IPI_allbutself,
  327. .send_IPI_all = uv_send_IPI_all,
  328. .send_IPI_self = uv_send_IPI_self,
  329. .wakeup_secondary_cpu = uv_wakeup_secondary,
  330. .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
  331. .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
  332. .wait_for_init_deassert = false,
  333. .smp_callin_clear_local_apic = NULL,
  334. .inquire_remote_apic = NULL,
  335. .read = native_apic_msr_read,
  336. .write = native_apic_msr_write,
  337. .eoi_write = native_apic_msr_eoi_write,
  338. .icr_read = native_x2apic_icr_read,
  339. .icr_write = native_x2apic_icr_write,
  340. .wait_icr_idle = native_x2apic_wait_icr_idle,
  341. .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
  342. };
  343. static void set_x2apic_extra_bits(int pnode)
  344. {
  345. __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
  346. }
  347. /*
  348. * Called on boot cpu.
  349. */
  350. static __init int boot_pnode_to_blade(int pnode)
  351. {
  352. int blade;
  353. for (blade = 0; blade < uv_num_possible_blades(); blade++)
  354. if (pnode == uv_blade_info[blade].pnode)
  355. return blade;
  356. BUG();
  357. }
  358. struct redir_addr {
  359. unsigned long redirect;
  360. unsigned long alias;
  361. };
  362. #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
  363. static __initdata struct redir_addr redir_addrs[] = {
  364. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
  365. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
  366. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
  367. };
  368. static unsigned char get_n_lshift(int m_val)
  369. {
  370. union uv3h_gr0_gam_gr_config_u m_gr_config;
  371. if (is_uv1_hub())
  372. return m_val;
  373. if (is_uv2_hub())
  374. return m_val == 40 ? 40 : 39;
  375. m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
  376. return m_gr_config.s3.m_skt;
  377. }
  378. static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
  379. {
  380. union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
  381. union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
  382. int i;
  383. for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
  384. alias.v = uv_read_local_mmr(redir_addrs[i].alias);
  385. if (alias.s.enable && alias.s.base == 0) {
  386. *size = (1UL << alias.s.m_alias);
  387. redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
  388. *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
  389. return;
  390. }
  391. }
  392. *base = *size = 0;
  393. }
  394. enum map_type {map_wb, map_uc};
  395. static __init void map_high(char *id, unsigned long base, int pshift,
  396. int bshift, int max_pnode, enum map_type map_type)
  397. {
  398. unsigned long bytes, paddr;
  399. paddr = base << pshift;
  400. bytes = (1UL << bshift) * (max_pnode + 1);
  401. if (!paddr) {
  402. pr_info("UV: Map %s_HI base address NULL\n", id);
  403. return;
  404. }
  405. pr_debug("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
  406. if (map_type == map_uc)
  407. init_extra_mapping_uc(paddr, bytes);
  408. else
  409. init_extra_mapping_wb(paddr, bytes);
  410. }
  411. static __init void map_gru_distributed(unsigned long c)
  412. {
  413. union uvh_rh_gam_gru_overlay_config_mmr_u gru;
  414. u64 paddr;
  415. unsigned long bytes;
  416. int nid;
  417. gru.v = c;
  418. /* only base bits 42:28 relevant in dist mode */
  419. gru_dist_base = gru.v & 0x000007fff0000000UL;
  420. if (!gru_dist_base) {
  421. pr_info("UV: Map GRU_DIST base address NULL\n");
  422. return;
  423. }
  424. bytes = 1UL << UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
  425. gru_dist_lmask = ((1UL << uv_hub_info->m_val) - 1) & ~(bytes - 1);
  426. gru_dist_umask = ~((1UL << uv_hub_info->m_val) - 1);
  427. gru_dist_base &= gru_dist_lmask; /* Clear bits above M */
  428. for_each_online_node(nid) {
  429. paddr = ((u64)uv_node_to_pnode(nid) << uv_hub_info->m_val) |
  430. gru_dist_base;
  431. init_extra_mapping_wb(paddr, bytes);
  432. gru_first_node_paddr = min(paddr, gru_first_node_paddr);
  433. gru_last_node_paddr = max(paddr, gru_last_node_paddr);
  434. }
  435. /* Save upper (63:M) bits of address only for is_GRU_range */
  436. gru_first_node_paddr &= gru_dist_umask;
  437. gru_last_node_paddr &= gru_dist_umask;
  438. pr_debug("UV: Map GRU_DIST base 0x%016llx 0x%016llx - 0x%016llx\n",
  439. gru_dist_base, gru_first_node_paddr, gru_last_node_paddr);
  440. }
  441. static __init void map_gru_high(int max_pnode)
  442. {
  443. union uvh_rh_gam_gru_overlay_config_mmr_u gru;
  444. int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
  445. gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
  446. if (!gru.s.enable) {
  447. pr_info("UV: GRU disabled\n");
  448. return;
  449. }
  450. if (is_uv3_hub() && gru.s3.mode) {
  451. map_gru_distributed(gru.v);
  452. return;
  453. }
  454. map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
  455. gru_start_paddr = ((u64)gru.s.base << shift);
  456. gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
  457. }
  458. static __init void map_mmr_high(int max_pnode)
  459. {
  460. union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
  461. int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
  462. mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
  463. if (mmr.s.enable)
  464. map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
  465. else
  466. pr_info("UV: MMR disabled\n");
  467. }
  468. /*
  469. * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
  470. * and REDIRECT MMR regs are exactly the same on UV3.
  471. */
  472. struct mmioh_config {
  473. unsigned long overlay;
  474. unsigned long redirect;
  475. char *id;
  476. };
  477. static __initdata struct mmioh_config mmiohs[] = {
  478. {
  479. UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
  480. UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
  481. "MMIOH0"
  482. },
  483. {
  484. UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
  485. UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
  486. "MMIOH1"
  487. },
  488. };
  489. static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
  490. {
  491. union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
  492. unsigned long mmr;
  493. unsigned long base;
  494. int i, n, shift, m_io, max_io;
  495. int nasid, lnasid, fi, li;
  496. char *id;
  497. id = mmiohs[index].id;
  498. overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
  499. pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
  500. id, overlay.v, overlay.s3.base, overlay.s3.m_io);
  501. if (!overlay.s3.enable) {
  502. pr_info("UV: %s disabled\n", id);
  503. return;
  504. }
  505. shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
  506. base = (unsigned long)overlay.s3.base;
  507. m_io = overlay.s3.m_io;
  508. mmr = mmiohs[index].redirect;
  509. n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
  510. min_pnode *= 2; /* convert to NASID */
  511. max_pnode *= 2;
  512. max_io = lnasid = fi = li = -1;
  513. for (i = 0; i < n; i++) {
  514. union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
  515. redirect.v = uv_read_local_mmr(mmr + i * 8);
  516. nasid = redirect.s3.nasid;
  517. if (nasid < min_pnode || max_pnode < nasid)
  518. nasid = -1; /* invalid NASID */
  519. if (nasid == lnasid) {
  520. li = i;
  521. if (i != n-1) /* last entry check */
  522. continue;
  523. }
  524. /* check if we have a cached (or last) redirect to print */
  525. if (lnasid != -1 || (i == n-1 && nasid != -1)) {
  526. unsigned long addr1, addr2;
  527. int f, l;
  528. if (lnasid == -1) {
  529. f = l = i;
  530. lnasid = nasid;
  531. } else {
  532. f = fi;
  533. l = li;
  534. }
  535. addr1 = (base << shift) +
  536. f * (unsigned long)(1 << m_io);
  537. addr2 = (base << shift) +
  538. (l + 1) * (unsigned long)(1 << m_io);
  539. pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
  540. id, fi, li, lnasid, addr1, addr2);
  541. if (max_io < l)
  542. max_io = l;
  543. }
  544. fi = li = i;
  545. lnasid = nasid;
  546. }
  547. pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
  548. id, base, shift, m_io, max_io);
  549. if (max_io >= 0)
  550. map_high(id, base, shift, m_io, max_io, map_uc);
  551. }
  552. static __init void map_mmioh_high(int min_pnode, int max_pnode)
  553. {
  554. union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
  555. unsigned long mmr, base;
  556. int shift, enable, m_io, n_io;
  557. if (is_uv3_hub()) {
  558. /* Map both MMIOH Regions */
  559. map_mmioh_high_uv3(0, min_pnode, max_pnode);
  560. map_mmioh_high_uv3(1, min_pnode, max_pnode);
  561. return;
  562. }
  563. if (is_uv1_hub()) {
  564. mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
  565. shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
  566. mmioh.v = uv_read_local_mmr(mmr);
  567. enable = !!mmioh.s1.enable;
  568. base = mmioh.s1.base;
  569. m_io = mmioh.s1.m_io;
  570. n_io = mmioh.s1.n_io;
  571. } else if (is_uv2_hub()) {
  572. mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
  573. shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
  574. mmioh.v = uv_read_local_mmr(mmr);
  575. enable = !!mmioh.s2.enable;
  576. base = mmioh.s2.base;
  577. m_io = mmioh.s2.m_io;
  578. n_io = mmioh.s2.n_io;
  579. } else
  580. return;
  581. if (enable) {
  582. max_pnode &= (1 << n_io) - 1;
  583. pr_info(
  584. "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
  585. base, shift, m_io, n_io, max_pnode);
  586. map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
  587. } else {
  588. pr_info("UV: MMIOH disabled\n");
  589. }
  590. }
  591. static __init void map_low_mmrs(void)
  592. {
  593. init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
  594. init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
  595. }
  596. static __init void uv_rtc_init(void)
  597. {
  598. long status;
  599. u64 ticks_per_sec;
  600. status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
  601. &ticks_per_sec);
  602. if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
  603. printk(KERN_WARNING
  604. "unable to determine platform RTC clock frequency, "
  605. "guessing.\n");
  606. /* BIOS gives wrong value for clock freq. so guess */
  607. sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
  608. } else
  609. sn_rtc_cycles_per_second = ticks_per_sec;
  610. }
  611. /*
  612. * percpu heartbeat timer
  613. */
  614. static void uv_heartbeat(unsigned long ignored)
  615. {
  616. struct timer_list *timer = &uv_hub_info->scir.timer;
  617. unsigned char bits = uv_hub_info->scir.state;
  618. /* flip heartbeat bit */
  619. bits ^= SCIR_CPU_HEARTBEAT;
  620. /* is this cpu idle? */
  621. if (idle_cpu(raw_smp_processor_id()))
  622. bits &= ~SCIR_CPU_ACTIVITY;
  623. else
  624. bits |= SCIR_CPU_ACTIVITY;
  625. /* update system controller interface reg */
  626. uv_set_scir_bits(bits);
  627. /* enable next timer period */
  628. mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
  629. }
  630. static void uv_heartbeat_enable(int cpu)
  631. {
  632. while (!uv_cpu_hub_info(cpu)->scir.enabled) {
  633. struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
  634. uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
  635. setup_timer(timer, uv_heartbeat, cpu);
  636. timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
  637. add_timer_on(timer, cpu);
  638. uv_cpu_hub_info(cpu)->scir.enabled = 1;
  639. /* also ensure that boot cpu is enabled */
  640. cpu = 0;
  641. }
  642. }
  643. #ifdef CONFIG_HOTPLUG_CPU
  644. static void uv_heartbeat_disable(int cpu)
  645. {
  646. if (uv_cpu_hub_info(cpu)->scir.enabled) {
  647. uv_cpu_hub_info(cpu)->scir.enabled = 0;
  648. del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
  649. }
  650. uv_set_cpu_scir_bits(cpu, 0xff);
  651. }
  652. /*
  653. * cpu hotplug notifier
  654. */
  655. static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
  656. void *hcpu)
  657. {
  658. long cpu = (long)hcpu;
  659. switch (action) {
  660. case CPU_ONLINE:
  661. uv_heartbeat_enable(cpu);
  662. break;
  663. case CPU_DOWN_PREPARE:
  664. uv_heartbeat_disable(cpu);
  665. break;
  666. default:
  667. break;
  668. }
  669. return NOTIFY_OK;
  670. }
  671. static __init void uv_scir_register_cpu_notifier(void)
  672. {
  673. hotcpu_notifier(uv_scir_cpu_notify, 0);
  674. }
  675. #else /* !CONFIG_HOTPLUG_CPU */
  676. static __init void uv_scir_register_cpu_notifier(void)
  677. {
  678. }
  679. static __init int uv_init_heartbeat(void)
  680. {
  681. int cpu;
  682. if (is_uv_system())
  683. for_each_online_cpu(cpu)
  684. uv_heartbeat_enable(cpu);
  685. return 0;
  686. }
  687. late_initcall(uv_init_heartbeat);
  688. #endif /* !CONFIG_HOTPLUG_CPU */
  689. /* Direct Legacy VGA I/O traffic to designated IOH */
  690. int uv_set_vga_state(struct pci_dev *pdev, bool decode,
  691. unsigned int command_bits, u32 flags)
  692. {
  693. int domain, bus, rc;
  694. PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
  695. pdev->devfn, decode, command_bits, flags);
  696. if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
  697. return 0;
  698. if ((command_bits & PCI_COMMAND_IO) == 0)
  699. return 0;
  700. domain = pci_domain_nr(pdev->bus);
  701. bus = pdev->bus->number;
  702. rc = uv_bios_set_legacy_vga_target(decode, domain, bus);
  703. PR_DEVEL("vga decode %d %x:%x, rc: %d\n", decode, domain, bus, rc);
  704. return rc;
  705. }
  706. /*
  707. * Called on each cpu to initialize the per_cpu UV data area.
  708. * FIXME: hotplug not supported yet
  709. */
  710. void uv_cpu_init(void)
  711. {
  712. /* CPU 0 initilization will be done via uv_system_init. */
  713. if (!uv_blade_info)
  714. return;
  715. uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
  716. if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
  717. set_x2apic_extra_bits(uv_hub_info->pnode);
  718. }
  719. void __init uv_system_init(void)
  720. {
  721. union uvh_rh_gam_config_mmr_u m_n_config;
  722. union uvh_node_id_u node_id;
  723. unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
  724. int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
  725. int gnode_extra, min_pnode = 999999, max_pnode = -1;
  726. unsigned long mmr_base, present, paddr;
  727. unsigned short pnode_mask;
  728. unsigned char n_lshift;
  729. char *hub = (is_uv1_hub() ? "UV1" :
  730. (is_uv2_hub() ? "UV2" :
  731. "UV3"));
  732. pr_info("UV: Found %s hub\n", hub);
  733. map_low_mmrs();
  734. m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
  735. m_val = m_n_config.s.m_skt;
  736. n_val = m_n_config.s.n_skt;
  737. pnode_mask = (1 << n_val) - 1;
  738. n_lshift = get_n_lshift(m_val);
  739. mmr_base =
  740. uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
  741. ~UV_MMR_ENABLE;
  742. node_id.v = uv_read_local_mmr(UVH_NODE_ID);
  743. gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
  744. gnode_upper = ((unsigned long)gnode_extra << m_val);
  745. pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x n_lshift 0x%x\n",
  746. n_val, m_val, pnode_mask, gnode_upper, gnode_extra,
  747. n_lshift);
  748. pr_info("UV: global MMR base 0x%lx\n", mmr_base);
  749. for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
  750. uv_possible_blades +=
  751. hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
  752. /* uv_num_possible_blades() is really the hub count */
  753. pr_info("UV: Found %d blades, %d hubs\n",
  754. is_uv1_hub() ? uv_num_possible_blades() :
  755. (uv_num_possible_blades() + 1) / 2,
  756. uv_num_possible_blades());
  757. bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
  758. uv_blade_info = kzalloc(bytes, GFP_KERNEL);
  759. BUG_ON(!uv_blade_info);
  760. for (blade = 0; blade < uv_num_possible_blades(); blade++)
  761. uv_blade_info[blade].memory_nid = -1;
  762. get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
  763. bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
  764. uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
  765. BUG_ON(!uv_node_to_blade);
  766. memset(uv_node_to_blade, 255, bytes);
  767. bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
  768. uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
  769. BUG_ON(!uv_cpu_to_blade);
  770. memset(uv_cpu_to_blade, 255, bytes);
  771. blade = 0;
  772. for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
  773. present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
  774. for (j = 0; j < 64; j++) {
  775. if (!test_bit(j, &present))
  776. continue;
  777. pnode = (i * 64 + j) & pnode_mask;
  778. uv_blade_info[blade].pnode = pnode;
  779. uv_blade_info[blade].nr_possible_cpus = 0;
  780. uv_blade_info[blade].nr_online_cpus = 0;
  781. spin_lock_init(&uv_blade_info[blade].nmi_lock);
  782. min_pnode = min(pnode, min_pnode);
  783. max_pnode = max(pnode, max_pnode);
  784. blade++;
  785. }
  786. }
  787. uv_bios_init();
  788. uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
  789. &sn_region_size, &system_serial_number);
  790. uv_rtc_init();
  791. for_each_present_cpu(cpu) {
  792. int apicid = per_cpu(x86_cpu_to_apicid, cpu);
  793. nid = cpu_to_node(cpu);
  794. /*
  795. * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
  796. */
  797. uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
  798. uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
  799. uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
  800. uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
  801. uv_cpu_hub_info(cpu)->n_lshift = n_lshift;
  802. pnode = uv_apicid_to_pnode(apicid);
  803. blade = boot_pnode_to_blade(pnode);
  804. lcpu = uv_blade_info[blade].nr_possible_cpus;
  805. uv_blade_info[blade].nr_possible_cpus++;
  806. /* Any node on the blade, else will contain -1. */
  807. uv_blade_info[blade].memory_nid = nid;
  808. uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
  809. uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
  810. uv_cpu_hub_info(cpu)->m_val = m_val;
  811. uv_cpu_hub_info(cpu)->n_val = n_val;
  812. uv_cpu_hub_info(cpu)->numa_blade_id = blade;
  813. uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
  814. uv_cpu_hub_info(cpu)->pnode = pnode;
  815. uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
  816. uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
  817. uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
  818. uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
  819. uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
  820. uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
  821. uv_node_to_blade[nid] = blade;
  822. uv_cpu_to_blade[cpu] = blade;
  823. }
  824. /* Add blade/pnode info for nodes without cpus */
  825. for_each_online_node(nid) {
  826. if (uv_node_to_blade[nid] >= 0)
  827. continue;
  828. paddr = node_start_pfn(nid) << PAGE_SHIFT;
  829. pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
  830. blade = boot_pnode_to_blade(pnode);
  831. uv_node_to_blade[nid] = blade;
  832. }
  833. map_gru_high(max_pnode);
  834. map_mmr_high(max_pnode);
  835. map_mmioh_high(min_pnode, max_pnode);
  836. uv_nmi_setup();
  837. uv_cpu_init();
  838. uv_scir_register_cpu_notifier();
  839. proc_mkdir("sgi_uv", NULL);
  840. /* register Legacy VGA I/O redirection handler */
  841. pci_register_set_vga_state(uv_set_vga_state);
  842. /*
  843. * For a kdump kernel the reset must be BOOT_ACPI, not BOOT_EFI, as
  844. * EFI is not enabled in the kdump kernel.
  845. */
  846. if (is_kdump_kernel())
  847. reboot_type = BOOT_ACPI;
  848. }
  849. apic_driver(apic_x2apic_uv_x);