setup.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392
  1. /*
  2. *
  3. * Common boot and setup code.
  4. *
  5. * Copyright (C) 2001 PPC64 Team, IBM Corp
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG
  13. #include <linux/config.h>
  14. #include <linux/module.h>
  15. #include <linux/string.h>
  16. #include <linux/sched.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/reboot.h>
  20. #include <linux/delay.h>
  21. #include <linux/initrd.h>
  22. #include <linux/ide.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/ioport.h>
  25. #include <linux/console.h>
  26. #include <linux/version.h>
  27. #include <linux/tty.h>
  28. #include <linux/root_dev.h>
  29. #include <linux/notifier.h>
  30. #include <linux/cpu.h>
  31. #include <linux/unistd.h>
  32. #include <linux/serial.h>
  33. #include <linux/serial_8250.h>
  34. #include <asm/io.h>
  35. #include <asm/prom.h>
  36. #include <asm/processor.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/bootinfo.h>
  39. #include <asm/smp.h>
  40. #include <asm/elf.h>
  41. #include <asm/machdep.h>
  42. #include <asm/iSeries/LparData.h>
  43. #include <asm/paca.h>
  44. #include <asm/ppcdebug.h>
  45. #include <asm/time.h>
  46. #include <asm/cputable.h>
  47. #include <asm/sections.h>
  48. #include <asm/btext.h>
  49. #include <asm/nvram.h>
  50. #include <asm/setup.h>
  51. #include <asm/system.h>
  52. #include <asm/rtas.h>
  53. #include <asm/iommu.h>
  54. #include <asm/serial.h>
  55. #include <asm/cache.h>
  56. #include <asm/page.h>
  57. #include <asm/mmu.h>
  58. #ifdef DEBUG
  59. #define DBG(fmt...) udbg_printf(fmt)
  60. #else
  61. #define DBG(fmt...)
  62. #endif
  63. /*
  64. * Here are some early debugging facilities. You can enable one
  65. * but your kernel will not boot on anything else if you do so
  66. */
  67. /* This one is for use on LPAR machines that support an HVC console
  68. * on vterm 0
  69. */
  70. extern void udbg_init_debug_lpar(void);
  71. /* This one is for use on Apple G5 machines
  72. */
  73. extern void udbg_init_pmac_realmode(void);
  74. /* That's RTAS panel debug */
  75. extern void call_rtas_display_status_delay(unsigned char c);
  76. /* Here's maple real mode debug */
  77. extern void udbg_init_maple_realmode(void);
  78. #define EARLY_DEBUG_INIT() do {} while(0)
  79. #if 0
  80. #define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
  81. #define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
  82. #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
  83. #define EARLY_DEBUG_INIT() \
  84. do { ppc_md.udbg_putc = call_rtas_display_status_delay; } while(0)
  85. #endif
  86. /* extern void *stab; */
  87. extern unsigned long klimit;
  88. extern void mm_init_ppc64(void);
  89. extern int idle_setup(void);
  90. extern void stab_initialize(unsigned long stab);
  91. extern void htab_initialize(void);
  92. extern void early_init_devtree(void *flat_dt);
  93. extern void unflatten_device_tree(void);
  94. extern void smp_release_cpus(void);
  95. unsigned long decr_overclock = 1;
  96. unsigned long decr_overclock_proc0 = 1;
  97. unsigned long decr_overclock_set = 0;
  98. unsigned long decr_overclock_proc0_set = 0;
  99. int have_of = 1;
  100. int boot_cpuid = 0;
  101. int boot_cpuid_phys = 0;
  102. dev_t boot_dev;
  103. u64 ppc64_pft_size;
  104. u64 ppc64_debug_switch;
  105. struct ppc64_caches ppc64_caches;
  106. EXPORT_SYMBOL_GPL(ppc64_caches);
  107. /*
  108. * These are used in binfmt_elf.c to put aux entries on the stack
  109. * for each elf executable being started.
  110. */
  111. int dcache_bsize;
  112. int icache_bsize;
  113. int ucache_bsize;
  114. /* The main machine-dep calls structure
  115. */
  116. struct machdep_calls ppc_md;
  117. EXPORT_SYMBOL(ppc_md);
  118. #ifdef CONFIG_MAGIC_SYSRQ
  119. unsigned long SYSRQ_KEY;
  120. #endif /* CONFIG_MAGIC_SYSRQ */
  121. static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
  122. static struct notifier_block ppc64_panic_block = {
  123. .notifier_call = ppc64_panic_event,
  124. .priority = INT_MIN /* may not return; must be done last */
  125. };
  126. /*
  127. * Perhaps we can put the pmac screen_info[] here
  128. * on pmac as well so we don't need the ifdef's.
  129. * Until we get multiple-console support in here
  130. * that is. -- Cort
  131. * Maybe tie it to serial consoles, since this is really what
  132. * these processors use on existing boards. -- Dan
  133. */
  134. struct screen_info screen_info = {
  135. .orig_x = 0,
  136. .orig_y = 25,
  137. .orig_video_cols = 80,
  138. .orig_video_lines = 25,
  139. .orig_video_isVGA = 1,
  140. .orig_video_points = 16
  141. };
  142. /*
  143. * Initialize the PPCDBG state. Called before relocation has been enabled.
  144. */
  145. void __init ppcdbg_initialize(void)
  146. {
  147. ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */
  148. /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */;
  149. }
  150. /*
  151. * Early boot console based on udbg
  152. */
  153. static struct console udbg_console = {
  154. .name = "udbg",
  155. .write = udbg_console_write,
  156. .flags = CON_PRINTBUFFER,
  157. .index = -1,
  158. };
  159. static int early_console_initialized;
  160. void __init disable_early_printk(void)
  161. {
  162. if (!early_console_initialized)
  163. return;
  164. unregister_console(&udbg_console);
  165. early_console_initialized = 0;
  166. }
  167. #if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
  168. static int smt_enabled_cmdline;
  169. /* Look for ibm,smt-enabled OF option */
  170. static void check_smt_enabled(void)
  171. {
  172. struct device_node *dn;
  173. char *smt_option;
  174. /* Allow the command line to overrule the OF option */
  175. if (smt_enabled_cmdline)
  176. return;
  177. dn = of_find_node_by_path("/options");
  178. if (dn) {
  179. smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
  180. if (smt_option) {
  181. if (!strcmp(smt_option, "on"))
  182. smt_enabled_at_boot = 1;
  183. else if (!strcmp(smt_option, "off"))
  184. smt_enabled_at_boot = 0;
  185. }
  186. }
  187. }
  188. /* Look for smt-enabled= cmdline option */
  189. static int __init early_smt_enabled(char *p)
  190. {
  191. smt_enabled_cmdline = 1;
  192. if (!p)
  193. return 0;
  194. if (!strcmp(p, "on") || !strcmp(p, "1"))
  195. smt_enabled_at_boot = 1;
  196. else if (!strcmp(p, "off") || !strcmp(p, "0"))
  197. smt_enabled_at_boot = 0;
  198. return 0;
  199. }
  200. early_param("smt-enabled", early_smt_enabled);
  201. /**
  202. * setup_cpu_maps - initialize the following cpu maps:
  203. * cpu_possible_map
  204. * cpu_present_map
  205. * cpu_sibling_map
  206. *
  207. * Having the possible map set up early allows us to restrict allocations
  208. * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  209. *
  210. * We do not initialize the online map here; cpus set their own bits in
  211. * cpu_online_map as they come up.
  212. *
  213. * This function is valid only for Open Firmware systems. finish_device_tree
  214. * must be called before using this.
  215. *
  216. * While we're here, we may as well set the "physical" cpu ids in the paca.
  217. */
  218. static void __init setup_cpu_maps(void)
  219. {
  220. struct device_node *dn = NULL;
  221. int cpu = 0;
  222. int swap_cpuid = 0;
  223. check_smt_enabled();
  224. while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
  225. u32 *intserv;
  226. int j, len = sizeof(u32), nthreads;
  227. intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
  228. &len);
  229. if (!intserv)
  230. intserv = (u32 *)get_property(dn, "reg", NULL);
  231. nthreads = len / sizeof(u32);
  232. for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
  233. cpu_set(cpu, cpu_present_map);
  234. set_hard_smp_processor_id(cpu, intserv[j]);
  235. if (intserv[j] == boot_cpuid_phys)
  236. swap_cpuid = cpu;
  237. cpu_set(cpu, cpu_possible_map);
  238. cpu++;
  239. }
  240. }
  241. /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
  242. * boot cpu is logical 0.
  243. */
  244. if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
  245. u32 tmp;
  246. tmp = get_hard_smp_processor_id(0);
  247. set_hard_smp_processor_id(0, boot_cpuid_phys);
  248. set_hard_smp_processor_id(swap_cpuid, tmp);
  249. }
  250. /*
  251. * On pSeries LPAR, we need to know how many cpus
  252. * could possibly be added to this partition.
  253. */
  254. if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
  255. (dn = of_find_node_by_path("/rtas"))) {
  256. int num_addr_cell, num_size_cell, maxcpus;
  257. unsigned int *ireg;
  258. num_addr_cell = prom_n_addr_cells(dn);
  259. num_size_cell = prom_n_size_cells(dn);
  260. ireg = (unsigned int *)
  261. get_property(dn, "ibm,lrdr-capacity", NULL);
  262. if (!ireg)
  263. goto out;
  264. maxcpus = ireg[num_addr_cell + num_size_cell];
  265. /* Double maxcpus for processors which have SMT capability */
  266. if (cpu_has_feature(CPU_FTR_SMT))
  267. maxcpus *= 2;
  268. if (maxcpus > NR_CPUS) {
  269. printk(KERN_WARNING
  270. "Partition configured for %d cpus, "
  271. "operating system maximum is %d.\n",
  272. maxcpus, NR_CPUS);
  273. maxcpus = NR_CPUS;
  274. } else
  275. printk(KERN_INFO "Partition configured for %d cpus.\n",
  276. maxcpus);
  277. for (cpu = 0; cpu < maxcpus; cpu++)
  278. cpu_set(cpu, cpu_possible_map);
  279. out:
  280. of_node_put(dn);
  281. }
  282. /*
  283. * Do the sibling map; assume only two threads per processor.
  284. */
  285. for_each_cpu(cpu) {
  286. cpu_set(cpu, cpu_sibling_map[cpu]);
  287. if (cpu_has_feature(CPU_FTR_SMT))
  288. cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
  289. }
  290. systemcfg->processorCount = num_present_cpus();
  291. }
  292. #endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */
  293. #ifdef CONFIG_PPC_MULTIPLATFORM
  294. extern struct machdep_calls pSeries_md;
  295. extern struct machdep_calls pmac_md;
  296. extern struct machdep_calls maple_md;
  297. /* Ultimately, stuff them in an elf section like initcalls... */
  298. static struct machdep_calls __initdata *machines[] = {
  299. #ifdef CONFIG_PPC_PSERIES
  300. &pSeries_md,
  301. #endif /* CONFIG_PPC_PSERIES */
  302. #ifdef CONFIG_PPC_PMAC
  303. &pmac_md,
  304. #endif /* CONFIG_PPC_PMAC */
  305. #ifdef CONFIG_PPC_MAPLE
  306. &maple_md,
  307. #endif /* CONFIG_PPC_MAPLE */
  308. NULL
  309. };
  310. /*
  311. * Early initialization entry point. This is called by head.S
  312. * with MMU translation disabled. We rely on the "feature" of
  313. * the CPU that ignores the top 2 bits of the address in real
  314. * mode so we can access kernel globals normally provided we
  315. * only toy with things in the RMO region. From here, we do
  316. * some early parsing of the device-tree to setup out LMB
  317. * data structures, and allocate & initialize the hash table
  318. * and segment tables so we can start running with translation
  319. * enabled.
  320. *
  321. * It is this function which will call the probe() callback of
  322. * the various platform types and copy the matching one to the
  323. * global ppc_md structure. Your platform can eventually do
  324. * some very early initializations from the probe() routine, but
  325. * this is not recommended, be very careful as, for example, the
  326. * device-tree is not accessible via normal means at this point.
  327. */
  328. void __init early_setup(unsigned long dt_ptr)
  329. {
  330. struct paca_struct *lpaca = get_paca();
  331. static struct machdep_calls **mach;
  332. /*
  333. * Enable early debugging if any specified (see top of
  334. * this file)
  335. */
  336. EARLY_DEBUG_INIT();
  337. DBG(" -> early_setup()\n");
  338. /*
  339. * Fill the default DBG level (do we want to keep
  340. * that old mecanism around forever ?)
  341. */
  342. ppcdbg_initialize();
  343. /*
  344. * Do early initializations using the flattened device
  345. * tree, like retreiving the physical memory map or
  346. * calculating/retreiving the hash table size
  347. */
  348. early_init_devtree(__va(dt_ptr));
  349. /*
  350. * Iterate all ppc_md structures until we find the proper
  351. * one for the current machine type
  352. */
  353. DBG("Probing machine type for platform %x...\n",
  354. systemcfg->platform);
  355. for (mach = machines; *mach; mach++) {
  356. if ((*mach)->probe(systemcfg->platform))
  357. break;
  358. }
  359. /* What can we do if we didn't find ? */
  360. if (*mach == NULL) {
  361. DBG("No suitable machine found !\n");
  362. for (;;);
  363. }
  364. ppc_md = **mach;
  365. /* our udbg callbacks got overriden by the above, let's put them
  366. * back in. Ultimately, I want those things to be split from the
  367. * main ppc_md
  368. */
  369. EARLY_DEBUG_INIT();
  370. DBG("Found, Initializing memory management...\n");
  371. /*
  372. * Initialize stab / SLB management
  373. */
  374. stab_initialize(lpaca->stab_real);
  375. /*
  376. * Initialize the MMU Hash table and create the linear mapping
  377. * of memory
  378. */
  379. htab_initialize();
  380. DBG(" <- early_setup()\n");
  381. }
  382. /*
  383. * Initialize some remaining members of the ppc64_caches and systemcfg structures
  384. * (at least until we get rid of them completely). This is mostly some
  385. * cache informations about the CPU that will be used by cache flush
  386. * routines and/or provided to userland
  387. */
  388. static void __init initialize_cache_info(void)
  389. {
  390. struct device_node *np;
  391. unsigned long num_cpus = 0;
  392. DBG(" -> initialize_cache_info()\n");
  393. for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
  394. num_cpus += 1;
  395. /* We're assuming *all* of the CPUs have the same
  396. * d-cache and i-cache sizes... -Peter
  397. */
  398. if ( num_cpus == 1 ) {
  399. u32 *sizep, *lsizep;
  400. u32 size, lsize;
  401. const char *dc, *ic;
  402. /* Then read cache informations */
  403. if (systemcfg->platform == PLATFORM_POWERMAC) {
  404. dc = "d-cache-block-size";
  405. ic = "i-cache-block-size";
  406. } else {
  407. dc = "d-cache-line-size";
  408. ic = "i-cache-line-size";
  409. }
  410. size = 0;
  411. lsize = cur_cpu_spec->dcache_bsize;
  412. sizep = (u32 *)get_property(np, "d-cache-size", NULL);
  413. if (sizep != NULL)
  414. size = *sizep;
  415. lsizep = (u32 *) get_property(np, dc, NULL);
  416. if (lsizep != NULL)
  417. lsize = *lsizep;
  418. if (sizep == 0 || lsizep == 0)
  419. DBG("Argh, can't find dcache properties ! "
  420. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  421. systemcfg->dcache_size = ppc64_caches.dsize = size;
  422. systemcfg->dcache_line_size =
  423. ppc64_caches.dline_size = lsize;
  424. ppc64_caches.log_dline_size = __ilog2(lsize);
  425. ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
  426. size = 0;
  427. lsize = cur_cpu_spec->icache_bsize;
  428. sizep = (u32 *)get_property(np, "i-cache-size", NULL);
  429. if (sizep != NULL)
  430. size = *sizep;
  431. lsizep = (u32 *)get_property(np, ic, NULL);
  432. if (lsizep != NULL)
  433. lsize = *lsizep;
  434. if (sizep == 0 || lsizep == 0)
  435. DBG("Argh, can't find icache properties ! "
  436. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  437. systemcfg->icache_size = ppc64_caches.isize = size;
  438. systemcfg->icache_line_size =
  439. ppc64_caches.iline_size = lsize;
  440. ppc64_caches.log_iline_size = __ilog2(lsize);
  441. ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
  442. }
  443. }
  444. /* Add an eye catcher and the systemcfg layout version number */
  445. strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
  446. systemcfg->version.major = SYSTEMCFG_MAJOR;
  447. systemcfg->version.minor = SYSTEMCFG_MINOR;
  448. systemcfg->processor = mfspr(SPRN_PVR);
  449. DBG(" <- initialize_cache_info()\n");
  450. }
  451. static void __init check_for_initrd(void)
  452. {
  453. #ifdef CONFIG_BLK_DEV_INITRD
  454. u64 *prop;
  455. DBG(" -> check_for_initrd()\n");
  456. prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL);
  457. if (prop != NULL) {
  458. initrd_start = (unsigned long)__va(*prop);
  459. prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL);
  460. if (prop != NULL) {
  461. initrd_end = (unsigned long)__va(*prop);
  462. initrd_below_start_ok = 1;
  463. } else
  464. initrd_start = 0;
  465. }
  466. /* If we were passed an initrd, set the ROOT_DEV properly if the values
  467. * look sensible. If not, clear initrd reference.
  468. */
  469. if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
  470. initrd_end > initrd_start)
  471. ROOT_DEV = Root_RAM0;
  472. else
  473. initrd_start = initrd_end = 0;
  474. if (initrd_start)
  475. printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
  476. DBG(" <- check_for_initrd()\n");
  477. #endif /* CONFIG_BLK_DEV_INITRD */
  478. }
  479. #endif /* CONFIG_PPC_MULTIPLATFORM */
  480. /*
  481. * Do some initial setup of the system. The parameters are those which
  482. * were passed in from the bootloader.
  483. */
  484. void __init setup_system(void)
  485. {
  486. DBG(" -> setup_system()\n");
  487. #ifdef CONFIG_PPC_ISERIES
  488. /* pSeries systems are identified in prom.c via OF. */
  489. if (itLpNaca.xLparInstalled == 1)
  490. systemcfg->platform = PLATFORM_ISERIES_LPAR;
  491. ppc_md.init_early();
  492. #else /* CONFIG_PPC_ISERIES */
  493. /*
  494. * Unflatten the device-tree passed by prom_init or kexec
  495. */
  496. unflatten_device_tree();
  497. /*
  498. * Fill the ppc64_caches & systemcfg structures with informations
  499. * retreived from the device-tree. Need to be called before
  500. * finish_device_tree() since the later requires some of the
  501. * informations filled up here to properly parse the interrupt
  502. * tree.
  503. * It also sets up the cache line sizes which allows to call
  504. * routines like flush_icache_range (used by the hash init
  505. * later on).
  506. */
  507. initialize_cache_info();
  508. #ifdef CONFIG_PPC_RTAS
  509. /*
  510. * Initialize RTAS if available
  511. */
  512. rtas_initialize();
  513. #endif /* CONFIG_PPC_RTAS */
  514. /*
  515. * Check if we have an initrd provided via the device-tree
  516. */
  517. check_for_initrd();
  518. /*
  519. * Do some platform specific early initializations, that includes
  520. * setting up the hash table pointers. It also sets up some interrupt-mapping
  521. * related options that will be used by finish_device_tree()
  522. */
  523. ppc_md.init_early();
  524. /*
  525. * "Finish" the device-tree, that is do the actual parsing of
  526. * some of the properties like the interrupt map
  527. */
  528. finish_device_tree();
  529. /*
  530. * Initialize xmon
  531. */
  532. #ifdef CONFIG_XMON_DEFAULT
  533. xmon_init();
  534. #endif
  535. /*
  536. * Register early console
  537. */
  538. early_console_initialized = 1;
  539. register_console(&udbg_console);
  540. /* Save unparsed command line copy for /proc/cmdline */
  541. strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
  542. parse_early_param();
  543. #endif /* !CONFIG_PPC_ISERIES */
  544. #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
  545. /*
  546. * iSeries has already initialized the cpu maps at this point.
  547. */
  548. setup_cpu_maps();
  549. /* Release secondary cpus out of their spinloops at 0x60 now that
  550. * we can map physical -> logical CPU ids
  551. */
  552. smp_release_cpus();
  553. #endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */
  554. printk("Starting Linux PPC64 %s\n", UTS_RELEASE);
  555. printk("-----------------------------------------------------\n");
  556. printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
  557. printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
  558. printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
  559. printk("systemcfg = 0x%p\n", systemcfg);
  560. printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
  561. printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
  562. printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
  563. printk("ppc64_caches.dcache_line_size = 0x%x\n",
  564. ppc64_caches.dline_size);
  565. printk("ppc64_caches.icache_line_size = 0x%x\n",
  566. ppc64_caches.iline_size);
  567. printk("htab_address = 0x%p\n", htab_address);
  568. printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
  569. printk("-----------------------------------------------------\n");
  570. mm_init_ppc64();
  571. DBG(" <- setup_system()\n");
  572. }
  573. void machine_restart(char *cmd)
  574. {
  575. if (ppc_md.nvram_sync)
  576. ppc_md.nvram_sync();
  577. ppc_md.restart(cmd);
  578. }
  579. EXPORT_SYMBOL(machine_restart);
  580. void machine_power_off(void)
  581. {
  582. if (ppc_md.nvram_sync)
  583. ppc_md.nvram_sync();
  584. ppc_md.power_off();
  585. }
  586. EXPORT_SYMBOL(machine_power_off);
  587. void machine_halt(void)
  588. {
  589. if (ppc_md.nvram_sync)
  590. ppc_md.nvram_sync();
  591. ppc_md.halt();
  592. }
  593. EXPORT_SYMBOL(machine_halt);
  594. unsigned long ppc_proc_freq;
  595. unsigned long ppc_tb_freq;
  596. static int ppc64_panic_event(struct notifier_block *this,
  597. unsigned long event, void *ptr)
  598. {
  599. ppc_md.panic((char *)ptr); /* May not return */
  600. return NOTIFY_DONE;
  601. }
  602. #ifdef CONFIG_SMP
  603. DEFINE_PER_CPU(unsigned int, pvr);
  604. #endif
  605. static int show_cpuinfo(struct seq_file *m, void *v)
  606. {
  607. unsigned long cpu_id = (unsigned long)v - 1;
  608. unsigned int pvr;
  609. unsigned short maj;
  610. unsigned short min;
  611. if (cpu_id == NR_CPUS) {
  612. seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
  613. if (ppc_md.get_cpuinfo != NULL)
  614. ppc_md.get_cpuinfo(m);
  615. return 0;
  616. }
  617. /* We only show online cpus: disable preempt (overzealous, I
  618. * knew) to prevent cpu going down. */
  619. preempt_disable();
  620. if (!cpu_online(cpu_id)) {
  621. preempt_enable();
  622. return 0;
  623. }
  624. #ifdef CONFIG_SMP
  625. pvr = per_cpu(pvr, cpu_id);
  626. #else
  627. pvr = mfspr(SPRN_PVR);
  628. #endif
  629. maj = (pvr >> 8) & 0xFF;
  630. min = pvr & 0xFF;
  631. seq_printf(m, "processor\t: %lu\n", cpu_id);
  632. seq_printf(m, "cpu\t\t: ");
  633. if (cur_cpu_spec->pvr_mask)
  634. seq_printf(m, "%s", cur_cpu_spec->cpu_name);
  635. else
  636. seq_printf(m, "unknown (%08x)", pvr);
  637. #ifdef CONFIG_ALTIVEC
  638. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  639. seq_printf(m, ", altivec supported");
  640. #endif /* CONFIG_ALTIVEC */
  641. seq_printf(m, "\n");
  642. /*
  643. * Assume here that all clock rates are the same in a
  644. * smp system. -- Cort
  645. */
  646. seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
  647. ppc_proc_freq % 1000000);
  648. seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
  649. preempt_enable();
  650. return 0;
  651. }
  652. static void *c_start(struct seq_file *m, loff_t *pos)
  653. {
  654. return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
  655. }
  656. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  657. {
  658. ++*pos;
  659. return c_start(m, pos);
  660. }
  661. static void c_stop(struct seq_file *m, void *v)
  662. {
  663. }
  664. struct seq_operations cpuinfo_op = {
  665. .start =c_start,
  666. .next = c_next,
  667. .stop = c_stop,
  668. .show = show_cpuinfo,
  669. };
  670. /*
  671. * These three variables are used to save values passed to us by prom_init()
  672. * via the device tree. The TCE variables are needed because with a memory_limit
  673. * in force we may need to explicitly map the TCE are at the top of RAM.
  674. */
  675. unsigned long memory_limit;
  676. unsigned long tce_alloc_start;
  677. unsigned long tce_alloc_end;
  678. #ifdef CONFIG_PPC_ISERIES
  679. /*
  680. * On iSeries we just parse the mem=X option from the command line.
  681. * On pSeries it's a bit more complicated, see prom_init_mem()
  682. */
  683. static int __init early_parsemem(char *p)
  684. {
  685. if (!p)
  686. return 0;
  687. memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
  688. return 0;
  689. }
  690. early_param("mem", early_parsemem);
  691. #endif /* CONFIG_PPC_ISERIES */
  692. #ifdef CONFIG_PPC_MULTIPLATFORM
  693. static int __init set_preferred_console(void)
  694. {
  695. struct device_node *prom_stdout = NULL;
  696. char *name;
  697. u32 *spd;
  698. int offset = 0;
  699. DBG(" -> set_preferred_console()\n");
  700. /* The user has requested a console so this is already set up. */
  701. if (strstr(saved_command_line, "console=")) {
  702. DBG(" console was specified !\n");
  703. return -EBUSY;
  704. }
  705. if (!of_chosen) {
  706. DBG(" of_chosen is NULL !\n");
  707. return -ENODEV;
  708. }
  709. /* We are getting a weird phandle from OF ... */
  710. /* ... So use the full path instead */
  711. name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
  712. if (name == NULL) {
  713. DBG(" no linux,stdout-path !\n");
  714. return -ENODEV;
  715. }
  716. prom_stdout = of_find_node_by_path(name);
  717. if (!prom_stdout) {
  718. DBG(" can't find stdout package %s !\n", name);
  719. return -ENODEV;
  720. }
  721. DBG("stdout is %s\n", prom_stdout->full_name);
  722. name = (char *)get_property(prom_stdout, "name", NULL);
  723. if (!name) {
  724. DBG(" stdout package has no name !\n");
  725. goto not_found;
  726. }
  727. spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
  728. if (0)
  729. ;
  730. #ifdef CONFIG_SERIAL_8250_CONSOLE
  731. else if (strcmp(name, "serial") == 0) {
  732. int i;
  733. u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
  734. if (i > 8) {
  735. switch (reg[1]) {
  736. case 0x3f8:
  737. offset = 0;
  738. break;
  739. case 0x2f8:
  740. offset = 1;
  741. break;
  742. case 0x898:
  743. offset = 2;
  744. break;
  745. case 0x890:
  746. offset = 3;
  747. break;
  748. default:
  749. /* We dont recognise the serial port */
  750. goto not_found;
  751. }
  752. }
  753. }
  754. #endif /* CONFIG_SERIAL_8250_CONSOLE */
  755. #ifdef CONFIG_PPC_PSERIES
  756. else if (strcmp(name, "vty") == 0) {
  757. u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
  758. char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
  759. if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
  760. /* Host Virtual Serial Interface */
  761. int offset;
  762. switch (reg[0]) {
  763. case 0x30000000:
  764. offset = 0;
  765. break;
  766. case 0x30000001:
  767. offset = 1;
  768. break;
  769. default:
  770. goto not_found;
  771. }
  772. of_node_put(prom_stdout);
  773. DBG("Found hvsi console at offset %d\n", offset);
  774. return add_preferred_console("hvsi", offset, NULL);
  775. } else {
  776. /* pSeries LPAR virtual console */
  777. of_node_put(prom_stdout);
  778. DBG("Found hvc console\n");
  779. return add_preferred_console("hvc", 0, NULL);
  780. }
  781. }
  782. #endif /* CONFIG_PPC_PSERIES */
  783. #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
  784. else if (strcmp(name, "ch-a") == 0)
  785. offset = 0;
  786. else if (strcmp(name, "ch-b") == 0)
  787. offset = 1;
  788. #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
  789. else
  790. goto not_found;
  791. of_node_put(prom_stdout);
  792. DBG("Found serial console at ttyS%d\n", offset);
  793. if (spd) {
  794. static char __initdata opt[16];
  795. sprintf(opt, "%d", *spd);
  796. return add_preferred_console("ttyS", offset, opt);
  797. } else
  798. return add_preferred_console("ttyS", offset, NULL);
  799. not_found:
  800. DBG("No preferred console found !\n");
  801. of_node_put(prom_stdout);
  802. return -ENODEV;
  803. }
  804. console_initcall(set_preferred_console);
  805. #endif /* CONFIG_PPC_MULTIPLATFORM */
  806. #ifdef CONFIG_IRQSTACKS
  807. static void __init irqstack_early_init(void)
  808. {
  809. unsigned int i;
  810. /*
  811. * interrupt stacks must be under 256MB, we cannot afford to take
  812. * SLB misses on them.
  813. */
  814. for_each_cpu(i) {
  815. softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  816. THREAD_SIZE, 0x10000000));
  817. hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  818. THREAD_SIZE, 0x10000000));
  819. }
  820. }
  821. #else
  822. #define irqstack_early_init()
  823. #endif
  824. /*
  825. * Stack space used when we detect a bad kernel stack pointer, and
  826. * early in SMP boots before relocation is enabled.
  827. */
  828. static void __init emergency_stack_init(void)
  829. {
  830. unsigned long limit;
  831. unsigned int i;
  832. /*
  833. * Emergency stacks must be under 256MB, we cannot afford to take
  834. * SLB misses on them. The ABI also requires them to be 128-byte
  835. * aligned.
  836. *
  837. * Since we use these as temporary stacks during secondary CPU
  838. * bringup, we need to get at them in real mode. This means they
  839. * must also be within the RMO region.
  840. */
  841. limit = min(0x10000000UL, lmb.rmo_size);
  842. for_each_cpu(i)
  843. paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
  844. limit)) + PAGE_SIZE;
  845. }
  846. /*
  847. * Called from setup_arch to initialize the bitmap of available
  848. * syscalls in the systemcfg page
  849. */
  850. void __init setup_syscall_map(void)
  851. {
  852. unsigned int i, count64 = 0, count32 = 0;
  853. extern unsigned long *sys_call_table;
  854. extern unsigned long *sys_call_table32;
  855. extern unsigned long sys_ni_syscall;
  856. for (i = 0; i < __NR_syscalls; i++) {
  857. if (sys_call_table[i] == sys_ni_syscall)
  858. continue;
  859. count64++;
  860. systemcfg->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f);
  861. }
  862. for (i = 0; i < __NR_syscalls; i++) {
  863. if (sys_call_table32[i] == sys_ni_syscall)
  864. continue;
  865. count32++;
  866. systemcfg->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f);
  867. }
  868. printk(KERN_INFO "Syscall map setup, %d 32 bits and %d 64 bits syscalls\n",
  869. count32, count64);
  870. }
  871. /*
  872. * Called into from start_kernel, after lock_kernel has been called.
  873. * Initializes bootmem, which is unsed to manage page allocation until
  874. * mem_init is called.
  875. */
  876. void __init setup_arch(char **cmdline_p)
  877. {
  878. extern void do_init_bootmem(void);
  879. ppc64_boot_msg(0x12, "Setup Arch");
  880. *cmdline_p = cmd_line;
  881. /*
  882. * Set cache line size based on type of cpu as a default.
  883. * Systems with OF can look in the properties on the cpu node(s)
  884. * for a possibly more accurate value.
  885. */
  886. dcache_bsize = ppc64_caches.dline_size;
  887. icache_bsize = ppc64_caches.iline_size;
  888. /* reboot on panic */
  889. panic_timeout = 180;
  890. if (ppc_md.panic)
  891. notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
  892. init_mm.start_code = PAGE_OFFSET;
  893. init_mm.end_code = (unsigned long) _etext;
  894. init_mm.end_data = (unsigned long) _edata;
  895. init_mm.brk = klimit;
  896. irqstack_early_init();
  897. emergency_stack_init();
  898. /* set up the bootmem stuff with available memory */
  899. do_init_bootmem();
  900. /* initialize the syscall map in systemcfg */
  901. setup_syscall_map();
  902. ppc_md.setup_arch();
  903. /* Select the correct idle loop for the platform. */
  904. idle_setup();
  905. paging_init();
  906. ppc64_boot_msg(0x15, "Setup Done");
  907. }
  908. /* ToDo: do something useful if ppc_md is not yet setup. */
  909. #define PPC64_LINUX_FUNCTION 0x0f000000
  910. #define PPC64_IPL_MESSAGE 0xc0000000
  911. #define PPC64_TERM_MESSAGE 0xb0000000
  912. #define PPC64_ATTN_MESSAGE 0xa0000000
  913. #define PPC64_DUMP_MESSAGE 0xd0000000
  914. static void ppc64_do_msg(unsigned int src, const char *msg)
  915. {
  916. if (ppc_md.progress) {
  917. char buf[32];
  918. sprintf(buf, "%08x \n", src);
  919. ppc_md.progress(buf, 0);
  920. sprintf(buf, "%-16s", msg);
  921. ppc_md.progress(buf, 0);
  922. }
  923. }
  924. /* Print a boot progress message. */
  925. void ppc64_boot_msg(unsigned int src, const char *msg)
  926. {
  927. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
  928. printk("[boot]%04x %s\n", src, msg);
  929. }
  930. /* Print a termination message (print only -- does not stop the kernel) */
  931. void ppc64_terminate_msg(unsigned int src, const char *msg)
  932. {
  933. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
  934. printk("[terminate]%04x %s\n", src, msg);
  935. }
  936. /* Print something that needs attention (device error, etc) */
  937. void ppc64_attention_msg(unsigned int src, const char *msg)
  938. {
  939. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_ATTN_MESSAGE|src, msg);
  940. printk("[attention]%04x %s\n", src, msg);
  941. }
  942. /* Print a dump progress message. */
  943. void ppc64_dump_msg(unsigned int src, const char *msg)
  944. {
  945. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_DUMP_MESSAGE|src, msg);
  946. printk("[dump]%04x %s\n", src, msg);
  947. }
  948. int set_spread_lpevents( char * str )
  949. {
  950. /* The parameter is the number of processors to share in processing lp events */
  951. unsigned long i;
  952. unsigned long val = simple_strtoul( str, NULL, 0 );
  953. if ( ( val > 0 ) && ( val <= NR_CPUS ) ) {
  954. for ( i=1; i<val; ++i )
  955. paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;
  956. printk("lpevent processing spread over %ld processors\n", val);
  957. }
  958. else
  959. printk("invalid spreaqd_lpevents %ld\n", val);
  960. return 1;
  961. }
  962. /* This should only be called on processor 0 during calibrate decr */
  963. void setup_default_decr(void)
  964. {
  965. struct paca_struct *lpaca = get_paca();
  966. if ( decr_overclock_set && !decr_overclock_proc0_set )
  967. decr_overclock_proc0 = decr_overclock;
  968. lpaca->default_decr = tb_ticks_per_jiffy / decr_overclock_proc0;
  969. lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
  970. }
  971. int set_decr_overclock_proc0( char * str )
  972. {
  973. unsigned long val = simple_strtoul( str, NULL, 0 );
  974. if ( ( val >= 1 ) && ( val <= 48 ) ) {
  975. decr_overclock_proc0_set = 1;
  976. decr_overclock_proc0 = val;
  977. printk("proc 0 decrementer overclock factor of %ld\n", val);
  978. }
  979. else
  980. printk("invalid proc 0 decrementer overclock factor of %ld\n", val);
  981. return 1;
  982. }
  983. int set_decr_overclock( char * str )
  984. {
  985. unsigned long val = simple_strtoul( str, NULL, 0 );
  986. if ( ( val >= 1 ) && ( val <= 48 ) ) {
  987. decr_overclock_set = 1;
  988. decr_overclock = val;
  989. printk("decrementer overclock factor of %ld\n", val);
  990. }
  991. else
  992. printk("invalid decrementer overclock factor of %ld\n", val);
  993. return 1;
  994. }
  995. __setup("spread_lpevents=", set_spread_lpevents );
  996. __setup("decr_overclock_proc0=", set_decr_overclock_proc0 );
  997. __setup("decr_overclock=", set_decr_overclock );
  998. #ifndef CONFIG_PPC_ISERIES
  999. /*
  1000. * This function can be used by platforms to "find" legacy serial ports.
  1001. * It works for "serial" nodes under an "isa" node, and will try to
  1002. * respect the "ibm,aix-loc" property if any. It works with up to 8
  1003. * ports.
  1004. */
  1005. #define MAX_LEGACY_SERIAL_PORTS 8
  1006. static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
  1007. static unsigned int old_serial_count;
  1008. void __init generic_find_legacy_serial_ports(u64 *physport,
  1009. unsigned int *default_speed)
  1010. {
  1011. struct device_node *np;
  1012. u32 *sizeprop;
  1013. struct isa_reg_property {
  1014. u32 space;
  1015. u32 address;
  1016. u32 size;
  1017. };
  1018. struct pci_reg_property {
  1019. struct pci_address addr;
  1020. u32 size_hi;
  1021. u32 size_lo;
  1022. };
  1023. DBG(" -> generic_find_legacy_serial_port()\n");
  1024. *physport = 0;
  1025. if (default_speed)
  1026. *default_speed = 0;
  1027. np = of_find_node_by_path("/");
  1028. if (!np)
  1029. return;
  1030. /* First fill our array */
  1031. for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
  1032. struct device_node *isa, *pci;
  1033. struct isa_reg_property *reg;
  1034. unsigned long phys_size, addr_size, io_base;
  1035. u32 *rangesp;
  1036. u32 *interrupts, *clk, *spd;
  1037. char *typep;
  1038. int index, rlen, rentsize;
  1039. /* Ok, first check if it's under an "isa" parent */
  1040. isa = of_get_parent(np);
  1041. if (!isa || strcmp(isa->name, "isa")) {
  1042. DBG("%s: no isa parent found\n", np->full_name);
  1043. continue;
  1044. }
  1045. /* Now look for an "ibm,aix-loc" property that gives us ordering
  1046. * if any...
  1047. */
  1048. typep = (char *)get_property(np, "ibm,aix-loc", NULL);
  1049. /* Get the ISA port number */
  1050. reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
  1051. if (reg == NULL)
  1052. goto next_port;
  1053. /* We assume the interrupt number isn't translated ... */
  1054. interrupts = (u32 *)get_property(np, "interrupts", NULL);
  1055. /* get clock freq. if present */
  1056. clk = (u32 *)get_property(np, "clock-frequency", NULL);
  1057. /* get default speed if present */
  1058. spd = (u32 *)get_property(np, "current-speed", NULL);
  1059. /* Default to locate at end of array */
  1060. index = old_serial_count; /* end of the array by default */
  1061. /* If we have a location index, then use it */
  1062. if (typep && *typep == 'S') {
  1063. index = simple_strtol(typep+1, NULL, 0) - 1;
  1064. /* if index is out of range, use end of array instead */
  1065. if (index >= MAX_LEGACY_SERIAL_PORTS)
  1066. index = old_serial_count;
  1067. /* if our index is still out of range, that mean that
  1068. * array is full, we could scan for a free slot but that
  1069. * make little sense to bother, just skip the port
  1070. */
  1071. if (index >= MAX_LEGACY_SERIAL_PORTS)
  1072. goto next_port;
  1073. if (index >= old_serial_count)
  1074. old_serial_count = index + 1;
  1075. /* Check if there is a port who already claimed our slot */
  1076. if (serial_ports[index].iobase != 0) {
  1077. /* if we still have some room, move it, else override */
  1078. if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
  1079. DBG("Moved legacy port %d -> %d\n", index,
  1080. old_serial_count);
  1081. serial_ports[old_serial_count++] =
  1082. serial_ports[index];
  1083. } else {
  1084. DBG("Replacing legacy port %d\n", index);
  1085. }
  1086. }
  1087. }
  1088. if (index >= MAX_LEGACY_SERIAL_PORTS)
  1089. goto next_port;
  1090. if (index >= old_serial_count)
  1091. old_serial_count = index + 1;
  1092. /* Now fill the entry */
  1093. memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
  1094. serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
  1095. serial_ports[index].iobase = reg->address;
  1096. serial_ports[index].irq = interrupts ? interrupts[0] : 0;
  1097. serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
  1098. DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
  1099. index,
  1100. serial_ports[index].iobase,
  1101. serial_ports[index].irq,
  1102. serial_ports[index].uartclk);
  1103. /* Get phys address of IO reg for port 1 */
  1104. if (index != 0)
  1105. goto next_port;
  1106. pci = of_get_parent(isa);
  1107. if (!pci) {
  1108. DBG("%s: no pci parent found\n", np->full_name);
  1109. goto next_port;
  1110. }
  1111. rangesp = (u32 *)get_property(pci, "ranges", &rlen);
  1112. if (rangesp == NULL) {
  1113. of_node_put(pci);
  1114. goto next_port;
  1115. }
  1116. rlen /= 4;
  1117. /* we need the #size-cells of the PCI bridge node itself */
  1118. phys_size = 1;
  1119. sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
  1120. if (sizeprop != NULL)
  1121. phys_size = *sizeprop;
  1122. /* we need the parent #addr-cells */
  1123. addr_size = prom_n_addr_cells(pci);
  1124. rentsize = 3 + addr_size + phys_size;
  1125. io_base = 0;
  1126. for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
  1127. if (((rangesp[0] >> 24) & 0x3) != 1)
  1128. continue; /* not IO space */
  1129. io_base = rangesp[3];
  1130. if (addr_size == 2)
  1131. io_base = (io_base << 32) | rangesp[4];
  1132. }
  1133. if (io_base != 0) {
  1134. *physport = io_base + reg->address;
  1135. if (default_speed && spd)
  1136. *default_speed = *spd;
  1137. }
  1138. of_node_put(pci);
  1139. next_port:
  1140. of_node_put(isa);
  1141. }
  1142. DBG(" <- generic_find_legacy_serial_port()\n");
  1143. }
  1144. static struct platform_device serial_device = {
  1145. .name = "serial8250",
  1146. .id = 0,
  1147. .dev = {
  1148. .platform_data = serial_ports,
  1149. },
  1150. };
  1151. static int __init serial_dev_init(void)
  1152. {
  1153. return platform_device_register(&serial_device);
  1154. }
  1155. arch_initcall(serial_dev_init);
  1156. #endif /* CONFIG_PPC_ISERIES */
  1157. int check_legacy_ioport(unsigned long base_port)
  1158. {
  1159. if (ppc_md.check_legacy_ioport == NULL)
  1160. return 0;
  1161. return ppc_md.check_legacy_ioport(base_port);
  1162. }
  1163. EXPORT_SYMBOL(check_legacy_ioport);
  1164. #ifdef CONFIG_XMON
  1165. static int __init early_xmon(char *p)
  1166. {
  1167. /* ensure xmon is enabled */
  1168. if (p) {
  1169. if (strncmp(p, "on", 2) == 0)
  1170. xmon_init();
  1171. if (strncmp(p, "early", 5) != 0)
  1172. return 0;
  1173. }
  1174. xmon_init();
  1175. debugger(NULL);
  1176. return 0;
  1177. }
  1178. early_param("xmon", early_xmon);
  1179. #endif
  1180. void cpu_die(void)
  1181. {
  1182. if (ppc_md.cpu_die)
  1183. ppc_md.cpu_die();
  1184. }