setup.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2007 Cavium Networks
  7. * Copyright (C) 2008, 2009 Wind River Systems
  8. * written by Ralf Baechle <ralf@linux-mips.org>
  9. */
  10. #include <linux/compiler.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/console.h>
  15. #include <linux/delay.h>
  16. #include <linux/export.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/serial.h>
  20. #include <linux/smp.h>
  21. #include <linux/types.h>
  22. #include <linux/string.h> /* for memset */
  23. #include <linux/tty.h>
  24. #include <linux/time.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/serial_core.h>
  27. #include <linux/serial_8250.h>
  28. #include <linux/of_fdt.h>
  29. #include <linux/libfdt.h>
  30. #include <linux/kexec.h>
  31. #include <asm/processor.h>
  32. #include <asm/reboot.h>
  33. #include <asm/smp-ops.h>
  34. #include <asm/irq_cpu.h>
  35. #include <asm/mipsregs.h>
  36. #include <asm/bootinfo.h>
  37. #include <asm/sections.h>
  38. #include <asm/time.h>
  39. #include <asm/octeon/octeon.h>
  40. #include <asm/octeon/pci-octeon.h>
  41. #include <asm/octeon/cvmx-rst-defs.h>
  42. /*
  43. * TRUE for devices having registers with little-endian byte
  44. * order, FALSE for registers with native-endian byte order.
  45. * PCI mandates little-endian, USB and SATA are configuraable,
  46. * but we chose little-endian for these.
  47. */
  48. const bool octeon_should_swizzle_table[256] = {
  49. [0x00] = true, /* bootbus/CF */
  50. [0x1b] = true, /* PCI mmio window */
  51. [0x1c] = true, /* PCI mmio window */
  52. [0x1d] = true, /* PCI mmio window */
  53. [0x1e] = true, /* PCI mmio window */
  54. [0x68] = true, /* OCTEON III USB */
  55. [0x69] = true, /* OCTEON III USB */
  56. [0x6c] = true, /* OCTEON III SATA */
  57. [0x6f] = true, /* OCTEON II USB */
  58. };
  59. EXPORT_SYMBOL(octeon_should_swizzle_table);
  60. #ifdef CONFIG_PCI
  61. extern void pci_console_init(const char *arg);
  62. #endif
  63. static unsigned long long max_memory = ULLONG_MAX;
  64. static unsigned long long reserve_low_mem;
  65. DEFINE_SEMAPHORE(octeon_bootbus_sem);
  66. EXPORT_SYMBOL(octeon_bootbus_sem);
  67. struct octeon_boot_descriptor *octeon_boot_desc_ptr;
  68. struct cvmx_bootinfo *octeon_bootinfo;
  69. EXPORT_SYMBOL(octeon_bootinfo);
  70. #ifdef CONFIG_KEXEC
  71. #ifdef CONFIG_SMP
  72. /*
  73. * Wait for relocation code is prepared and send
  74. * secondary CPUs to spin until kernel is relocated.
  75. */
  76. static void octeon_kexec_smp_down(void *ignored)
  77. {
  78. int cpu = smp_processor_id();
  79. local_irq_disable();
  80. set_cpu_online(cpu, false);
  81. while (!atomic_read(&kexec_ready_to_reboot))
  82. cpu_relax();
  83. asm volatile (
  84. " sync \n"
  85. " synci ($0) \n");
  86. relocated_kexec_smp_wait(NULL);
  87. }
  88. #endif
  89. #define OCTEON_DDR0_BASE (0x0ULL)
  90. #define OCTEON_DDR0_SIZE (0x010000000ULL)
  91. #define OCTEON_DDR1_BASE (0x410000000ULL)
  92. #define OCTEON_DDR1_SIZE (0x010000000ULL)
  93. #define OCTEON_DDR2_BASE (0x020000000ULL)
  94. #define OCTEON_DDR2_SIZE (0x3e0000000ULL)
  95. #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
  96. static struct kimage *kimage_ptr;
  97. static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
  98. {
  99. int64_t addr;
  100. struct cvmx_bootmem_desc *bootmem_desc;
  101. bootmem_desc = cvmx_bootmem_get_desc();
  102. if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
  103. mem_size = OCTEON_MAX_PHY_MEM_SIZE;
  104. pr_err("Error: requested memory too large,"
  105. "truncating to maximum size\n");
  106. }
  107. bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
  108. bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
  109. addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
  110. bootmem_desc->head_addr = 0;
  111. if (mem_size <= OCTEON_DDR0_SIZE) {
  112. __cvmx_bootmem_phy_free(addr,
  113. mem_size - reserve_low_mem -
  114. low_reserved_bytes, 0);
  115. return;
  116. }
  117. __cvmx_bootmem_phy_free(addr,
  118. OCTEON_DDR0_SIZE - reserve_low_mem -
  119. low_reserved_bytes, 0);
  120. mem_size -= OCTEON_DDR0_SIZE;
  121. if (mem_size > OCTEON_DDR1_SIZE) {
  122. __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
  123. __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
  124. mem_size - OCTEON_DDR1_SIZE, 0);
  125. } else
  126. __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
  127. }
  128. static int octeon_kexec_prepare(struct kimage *image)
  129. {
  130. int i;
  131. char *bootloader = "kexec";
  132. octeon_boot_desc_ptr->argc = 0;
  133. for (i = 0; i < image->nr_segments; i++) {
  134. if (!strncmp(bootloader, (char *)image->segment[i].buf,
  135. strlen(bootloader))) {
  136. /*
  137. * convert command line string to array
  138. * of parameters (as bootloader does).
  139. */
  140. int argc = 0, offt;
  141. char *str = (char *)image->segment[i].buf;
  142. char *ptr = strchr(str, ' ');
  143. while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
  144. *ptr = '\0';
  145. if (ptr[1] != ' ') {
  146. offt = (int)(ptr - str + 1);
  147. octeon_boot_desc_ptr->argv[argc] =
  148. image->segment[i].mem + offt;
  149. argc++;
  150. }
  151. ptr = strchr(ptr + 1, ' ');
  152. }
  153. octeon_boot_desc_ptr->argc = argc;
  154. break;
  155. }
  156. }
  157. /*
  158. * Information about segments will be needed during pre-boot memory
  159. * initialization.
  160. */
  161. kimage_ptr = image;
  162. return 0;
  163. }
  164. static void octeon_generic_shutdown(void)
  165. {
  166. int i;
  167. #ifdef CONFIG_SMP
  168. int cpu;
  169. #endif
  170. struct cvmx_bootmem_desc *bootmem_desc;
  171. void *named_block_array_ptr;
  172. bootmem_desc = cvmx_bootmem_get_desc();
  173. named_block_array_ptr =
  174. cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
  175. #ifdef CONFIG_SMP
  176. /* disable watchdogs */
  177. for_each_online_cpu(cpu)
  178. cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
  179. #else
  180. cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
  181. #endif
  182. if (kimage_ptr != kexec_crash_image) {
  183. memset(named_block_array_ptr,
  184. 0x0,
  185. CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
  186. sizeof(struct cvmx_bootmem_named_block_desc));
  187. /*
  188. * Mark all memory (except low 0x100000 bytes) as free.
  189. * It is the same thing that bootloader does.
  190. */
  191. kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
  192. 0x100000);
  193. /*
  194. * Allocate all segments to avoid their corruption during boot.
  195. */
  196. for (i = 0; i < kimage_ptr->nr_segments; i++)
  197. cvmx_bootmem_alloc_address(
  198. kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
  199. kimage_ptr->segment[i].mem - PAGE_SIZE,
  200. PAGE_SIZE);
  201. } else {
  202. /*
  203. * Do not mark all memory as free. Free only named sections
  204. * leaving the rest of memory unchanged.
  205. */
  206. struct cvmx_bootmem_named_block_desc *ptr =
  207. (struct cvmx_bootmem_named_block_desc *)
  208. named_block_array_ptr;
  209. for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
  210. if (ptr[i].size)
  211. cvmx_bootmem_free_named(ptr[i].name);
  212. }
  213. kexec_args[2] = 1UL; /* running on octeon_main_processor */
  214. kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
  215. #ifdef CONFIG_SMP
  216. secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
  217. secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
  218. #endif
  219. }
  220. static void octeon_shutdown(void)
  221. {
  222. octeon_generic_shutdown();
  223. #ifdef CONFIG_SMP
  224. smp_call_function(octeon_kexec_smp_down, NULL, 0);
  225. smp_wmb();
  226. while (num_online_cpus() > 1) {
  227. cpu_relax();
  228. mdelay(1);
  229. }
  230. #endif
  231. }
  232. static void octeon_crash_shutdown(struct pt_regs *regs)
  233. {
  234. octeon_generic_shutdown();
  235. default_machine_crash_shutdown(regs);
  236. }
  237. #ifdef CONFIG_SMP
  238. void octeon_crash_smp_send_stop(void)
  239. {
  240. int cpu;
  241. /* disable watchdogs */
  242. for_each_online_cpu(cpu)
  243. cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
  244. }
  245. #endif
  246. #endif /* CONFIG_KEXEC */
  247. #ifdef CONFIG_CAVIUM_RESERVE32
  248. uint64_t octeon_reserve32_memory;
  249. EXPORT_SYMBOL(octeon_reserve32_memory);
  250. #endif
  251. #ifdef CONFIG_KEXEC
  252. /* crashkernel cmdline parameter is parsed _after_ memory setup
  253. * we also parse it here (workaround for EHB5200) */
  254. static uint64_t crashk_size, crashk_base;
  255. #endif
  256. static int octeon_uart;
  257. extern asmlinkage void handle_int(void);
  258. /**
  259. * Return non zero if we are currently running in the Octeon simulator
  260. *
  261. * Returns
  262. */
  263. int octeon_is_simulation(void)
  264. {
  265. return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
  266. }
  267. EXPORT_SYMBOL(octeon_is_simulation);
  268. /**
  269. * Return true if Octeon is in PCI Host mode. This means
  270. * Linux can control the PCI bus.
  271. *
  272. * Returns Non zero if Octeon in host mode.
  273. */
  274. int octeon_is_pci_host(void)
  275. {
  276. #ifdef CONFIG_PCI
  277. return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
  278. #else
  279. return 0;
  280. #endif
  281. }
  282. /**
  283. * Get the clock rate of Octeon
  284. *
  285. * Returns Clock rate in HZ
  286. */
  287. uint64_t octeon_get_clock_rate(void)
  288. {
  289. struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
  290. return sysinfo->cpu_clock_hz;
  291. }
  292. EXPORT_SYMBOL(octeon_get_clock_rate);
  293. static u64 octeon_io_clock_rate;
  294. u64 octeon_get_io_clock_rate(void)
  295. {
  296. return octeon_io_clock_rate;
  297. }
  298. EXPORT_SYMBOL(octeon_get_io_clock_rate);
  299. /**
  300. * Write to the LCD display connected to the bootbus. This display
  301. * exists on most Cavium evaluation boards. If it doesn't exist, then
  302. * this function doesn't do anything.
  303. *
  304. * @s: String to write
  305. */
  306. void octeon_write_lcd(const char *s)
  307. {
  308. if (octeon_bootinfo->led_display_base_addr) {
  309. void __iomem *lcd_address =
  310. ioremap_nocache(octeon_bootinfo->led_display_base_addr,
  311. 8);
  312. int i;
  313. for (i = 0; i < 8; i++, s++) {
  314. if (*s)
  315. iowrite8(*s, lcd_address + i);
  316. else
  317. iowrite8(' ', lcd_address + i);
  318. }
  319. iounmap(lcd_address);
  320. }
  321. }
  322. /**
  323. * Return the console uart passed by the bootloader
  324. *
  325. * Returns uart (0 or 1)
  326. */
  327. int octeon_get_boot_uart(void)
  328. {
  329. return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
  330. 1 : 0;
  331. }
  332. /**
  333. * Get the coremask Linux was booted on.
  334. *
  335. * Returns Core mask
  336. */
  337. int octeon_get_boot_coremask(void)
  338. {
  339. return octeon_boot_desc_ptr->core_mask;
  340. }
  341. /**
  342. * Check the hardware BIST results for a CPU
  343. */
  344. void octeon_check_cpu_bist(void)
  345. {
  346. const int coreid = cvmx_get_core_num();
  347. unsigned long long mask;
  348. unsigned long long bist_val;
  349. /* Check BIST results for COP0 registers */
  350. mask = 0x1f00000000ull;
  351. bist_val = read_octeon_c0_icacheerr();
  352. if (bist_val & mask)
  353. pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
  354. coreid, bist_val);
  355. bist_val = read_octeon_c0_dcacheerr();
  356. if (bist_val & 1)
  357. pr_err("Core%d L1 Dcache parity error: "
  358. "CacheErr(dcache) = 0x%llx\n",
  359. coreid, bist_val);
  360. mask = 0xfc00000000000000ull;
  361. bist_val = read_c0_cvmmemctl();
  362. if (bist_val & mask)
  363. pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
  364. coreid, bist_val);
  365. write_octeon_c0_dcacheerr(0);
  366. }
  367. /**
  368. * Reboot Octeon
  369. *
  370. * @command: Command to pass to the bootloader. Currently ignored.
  371. */
  372. static void octeon_restart(char *command)
  373. {
  374. /* Disable all watchdogs before soft reset. They don't get cleared */
  375. #ifdef CONFIG_SMP
  376. int cpu;
  377. for_each_online_cpu(cpu)
  378. cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
  379. #else
  380. cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
  381. #endif
  382. mb();
  383. while (1)
  384. if (OCTEON_IS_OCTEON3())
  385. cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
  386. else
  387. cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
  388. }
  389. /**
  390. * Permanently stop a core.
  391. *
  392. * @arg: Ignored.
  393. */
  394. static void octeon_kill_core(void *arg)
  395. {
  396. if (octeon_is_simulation())
  397. /* A break instruction causes the simulator stop a core */
  398. asm volatile ("break" ::: "memory");
  399. local_irq_disable();
  400. /* Disable watchdog on this core. */
  401. cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
  402. /* Spin in a low power mode. */
  403. while (true)
  404. asm volatile ("wait" ::: "memory");
  405. }
  406. /**
  407. * Halt the system
  408. */
  409. static void octeon_halt(void)
  410. {
  411. smp_call_function(octeon_kill_core, NULL, 0);
  412. switch (octeon_bootinfo->board_type) {
  413. case CVMX_BOARD_TYPE_NAO38:
  414. /* Driving a 1 to GPIO 12 shuts off this board */
  415. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
  416. cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
  417. break;
  418. default:
  419. octeon_write_lcd("PowerOff");
  420. break;
  421. }
  422. octeon_kill_core(NULL);
  423. }
  424. static char __read_mostly octeon_system_type[80];
  425. static void __init init_octeon_system_type(void)
  426. {
  427. char const *board_type;
  428. board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
  429. if (board_type == NULL) {
  430. struct device_node *root;
  431. int ret;
  432. root = of_find_node_by_path("/");
  433. ret = of_property_read_string(root, "model", &board_type);
  434. of_node_put(root);
  435. if (ret)
  436. board_type = "Unsupported Board";
  437. }
  438. snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
  439. board_type, octeon_model_get_string(read_c0_prid()));
  440. }
  441. /**
  442. * Return a string representing the system type
  443. *
  444. * Returns
  445. */
  446. const char *octeon_board_type_string(void)
  447. {
  448. return octeon_system_type;
  449. }
  450. const char *get_system_type(void)
  451. __attribute__ ((alias("octeon_board_type_string")));
  452. void octeon_user_io_init(void)
  453. {
  454. union octeon_cvmemctl cvmmemctl;
  455. /* Get the current settings for CP0_CVMMEMCTL_REG */
  456. cvmmemctl.u64 = read_c0_cvmmemctl();
  457. /* R/W If set, marked write-buffer entries time out the same
  458. * as as other entries; if clear, marked write-buffer entries
  459. * use the maximum timeout. */
  460. cvmmemctl.s.dismarkwblongto = 1;
  461. /* R/W If set, a merged store does not clear the write-buffer
  462. * entry timeout state. */
  463. cvmmemctl.s.dismrgclrwbto = 0;
  464. /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
  465. * word location for an IOBDMA. The other 8 bits come from the
  466. * SCRADDR field of the IOBDMA. */
  467. cvmmemctl.s.iobdmascrmsb = 0;
  468. /* R/W If set, SYNCWS and SYNCS only order marked stores; if
  469. * clear, SYNCWS and SYNCS only order unmarked
  470. * stores. SYNCWSMARKED has no effect when DISSYNCWS is
  471. * set. */
  472. cvmmemctl.s.syncwsmarked = 0;
  473. /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
  474. cvmmemctl.s.dissyncws = 0;
  475. /* R/W If set, no stall happens on write buffer full. */
  476. if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
  477. cvmmemctl.s.diswbfst = 1;
  478. else
  479. cvmmemctl.s.diswbfst = 0;
  480. /* R/W If set (and SX set), supervisor-level loads/stores can
  481. * use XKPHYS addresses with <48>==0 */
  482. cvmmemctl.s.xkmemenas = 0;
  483. /* R/W If set (and UX set), user-level loads/stores can use
  484. * XKPHYS addresses with VA<48>==0 */
  485. cvmmemctl.s.xkmemenau = 0;
  486. /* R/W If set (and SX set), supervisor-level loads/stores can
  487. * use XKPHYS addresses with VA<48>==1 */
  488. cvmmemctl.s.xkioenas = 0;
  489. /* R/W If set (and UX set), user-level loads/stores can use
  490. * XKPHYS addresses with VA<48>==1 */
  491. cvmmemctl.s.xkioenau = 0;
  492. /* R/W If set, all stores act as SYNCW (NOMERGE must be set
  493. * when this is set) RW, reset to 0. */
  494. cvmmemctl.s.allsyncw = 0;
  495. /* R/W If set, no stores merge, and all stores reach the
  496. * coherent bus in order. */
  497. cvmmemctl.s.nomerge = 0;
  498. /* R/W Selects the bit in the counter used for DID time-outs 0
  499. * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
  500. * between 1x and 2x this interval. For example, with
  501. * DIDTTO=3, expiration interval is between 16K and 32K. */
  502. cvmmemctl.s.didtto = 0;
  503. /* R/W If set, the (mem) CSR clock never turns off. */
  504. cvmmemctl.s.csrckalwys = 0;
  505. /* R/W If set, mclk never turns off. */
  506. cvmmemctl.s.mclkalwys = 0;
  507. /* R/W Selects the bit in the counter used for write buffer
  508. * flush time-outs (WBFLT+11) is the bit position in an
  509. * internal counter used to determine expiration. The write
  510. * buffer expires between 1x and 2x this interval. For
  511. * example, with WBFLT = 0, a write buffer expires between 2K
  512. * and 4K cycles after the write buffer entry is allocated. */
  513. cvmmemctl.s.wbfltime = 0;
  514. /* R/W If set, do not put Istream in the L2 cache. */
  515. cvmmemctl.s.istrnol2 = 0;
  516. /*
  517. * R/W The write buffer threshold. As per erratum Core-14752
  518. * for CN63XX, a sc/scd might fail if the write buffer is
  519. * full. Lowering WBTHRESH greatly lowers the chances of the
  520. * write buffer ever being full and triggering the erratum.
  521. */
  522. if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
  523. cvmmemctl.s.wbthresh = 4;
  524. else
  525. cvmmemctl.s.wbthresh = 10;
  526. /* R/W If set, CVMSEG is available for loads/stores in
  527. * kernel/debug mode. */
  528. #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
  529. cvmmemctl.s.cvmsegenak = 1;
  530. #else
  531. cvmmemctl.s.cvmsegenak = 0;
  532. #endif
  533. /* R/W If set, CVMSEG is available for loads/stores in
  534. * supervisor mode. */
  535. cvmmemctl.s.cvmsegenas = 0;
  536. /* R/W If set, CVMSEG is available for loads/stores in user
  537. * mode. */
  538. cvmmemctl.s.cvmsegenau = 0;
  539. write_c0_cvmmemctl(cvmmemctl.u64);
  540. /* Setup of CVMSEG is done in kernel-entry-init.h */
  541. if (smp_processor_id() == 0)
  542. pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
  543. CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
  544. CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
  545. if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
  546. union cvmx_iob_fau_timeout fau_timeout;
  547. /* Set a default for the hardware timeouts */
  548. fau_timeout.u64 = 0;
  549. fau_timeout.s.tout_val = 0xfff;
  550. /* Disable tagwait FAU timeout */
  551. fau_timeout.s.tout_enb = 0;
  552. cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
  553. }
  554. if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
  555. !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
  556. OCTEON_IS_MODEL(OCTEON_CN70XX)) {
  557. union cvmx_pow_nw_tim nm_tim;
  558. nm_tim.u64 = 0;
  559. /* 4096 cycles */
  560. nm_tim.s.nw_tim = 3;
  561. cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
  562. }
  563. write_octeon_c0_icacheerr(0);
  564. write_c0_derraddr1(0);
  565. }
  566. /**
  567. * Early entry point for arch setup
  568. */
  569. void __init prom_init(void)
  570. {
  571. struct cvmx_sysinfo *sysinfo;
  572. const char *arg;
  573. char *p;
  574. int i;
  575. u64 t;
  576. int argc;
  577. #ifdef CONFIG_CAVIUM_RESERVE32
  578. int64_t addr = -1;
  579. #endif
  580. /*
  581. * The bootloader passes a pointer to the boot descriptor in
  582. * $a3, this is available as fw_arg3.
  583. */
  584. octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
  585. octeon_bootinfo =
  586. cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
  587. cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
  588. sysinfo = cvmx_sysinfo_get();
  589. memset(sysinfo, 0, sizeof(*sysinfo));
  590. sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
  591. sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
  592. if ((octeon_bootinfo->major_version > 1) ||
  593. (octeon_bootinfo->major_version == 1 &&
  594. octeon_bootinfo->minor_version >= 4))
  595. cvmx_coremask_copy(&sysinfo->core_mask,
  596. &octeon_bootinfo->ext_core_mask);
  597. else
  598. cvmx_coremask_set64(&sysinfo->core_mask,
  599. octeon_bootinfo->core_mask);
  600. /* Some broken u-boot pass garbage in upper bits, clear them out */
  601. if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
  602. for (i = 512; i < 1024; i++)
  603. cvmx_coremask_clear_core(&sysinfo->core_mask, i);
  604. sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
  605. sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
  606. sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
  607. sysinfo->board_type = octeon_bootinfo->board_type;
  608. sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
  609. sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
  610. memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
  611. sizeof(sysinfo->mac_addr_base));
  612. sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
  613. memcpy(sysinfo->board_serial_number,
  614. octeon_bootinfo->board_serial_number,
  615. sizeof(sysinfo->board_serial_number));
  616. sysinfo->compact_flash_common_base_addr =
  617. octeon_bootinfo->compact_flash_common_base_addr;
  618. sysinfo->compact_flash_attribute_base_addr =
  619. octeon_bootinfo->compact_flash_attribute_base_addr;
  620. sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
  621. sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
  622. sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
  623. if (OCTEON_IS_OCTEON2()) {
  624. /* I/O clock runs at a different rate than the CPU. */
  625. union cvmx_mio_rst_boot rst_boot;
  626. rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
  627. octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
  628. } else if (OCTEON_IS_OCTEON3()) {
  629. /* I/O clock runs at a different rate than the CPU. */
  630. union cvmx_rst_boot rst_boot;
  631. rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
  632. octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
  633. } else {
  634. octeon_io_clock_rate = sysinfo->cpu_clock_hz;
  635. }
  636. t = read_c0_cvmctl();
  637. if ((t & (1ull << 27)) == 0) {
  638. /*
  639. * Setup the multiplier save/restore code if
  640. * CvmCtl[NOMUL] clear.
  641. */
  642. void *save;
  643. void *save_end;
  644. void *restore;
  645. void *restore_end;
  646. int save_len;
  647. int restore_len;
  648. int save_max = (char *)octeon_mult_save_end -
  649. (char *)octeon_mult_save;
  650. int restore_max = (char *)octeon_mult_restore_end -
  651. (char *)octeon_mult_restore;
  652. if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
  653. save = octeon_mult_save3;
  654. save_end = octeon_mult_save3_end;
  655. restore = octeon_mult_restore3;
  656. restore_end = octeon_mult_restore3_end;
  657. } else {
  658. save = octeon_mult_save2;
  659. save_end = octeon_mult_save2_end;
  660. restore = octeon_mult_restore2;
  661. restore_end = octeon_mult_restore2_end;
  662. }
  663. save_len = (char *)save_end - (char *)save;
  664. restore_len = (char *)restore_end - (char *)restore;
  665. if (!WARN_ON(save_len > save_max ||
  666. restore_len > restore_max)) {
  667. memcpy(octeon_mult_save, save, save_len);
  668. memcpy(octeon_mult_restore, restore, restore_len);
  669. }
  670. }
  671. /*
  672. * Only enable the LED controller if we're running on a CN38XX, CN58XX,
  673. * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
  674. */
  675. if (!octeon_is_simulation() &&
  676. octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
  677. cvmx_write_csr(CVMX_LED_EN, 0);
  678. cvmx_write_csr(CVMX_LED_PRT, 0);
  679. cvmx_write_csr(CVMX_LED_DBG, 0);
  680. cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
  681. cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
  682. cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
  683. cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
  684. cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
  685. cvmx_write_csr(CVMX_LED_EN, 1);
  686. }
  687. #ifdef CONFIG_CAVIUM_RESERVE32
  688. /*
  689. * We need to temporarily allocate all memory in the reserve32
  690. * region. This makes sure the kernel doesn't allocate this
  691. * memory when it is getting memory from the
  692. * bootloader. Later, after the memory allocations are
  693. * complete, the reserve32 will be freed.
  694. *
  695. * Allocate memory for RESERVED32 aligned on 2MB boundary. This
  696. * is in case we later use hugetlb entries with it.
  697. */
  698. addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
  699. 0, 0, 2 << 20,
  700. "CAVIUM_RESERVE32", 0);
  701. if (addr < 0)
  702. pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
  703. else
  704. octeon_reserve32_memory = addr;
  705. #endif
  706. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
  707. if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
  708. pr_info("Skipping L2 locking due to reduced L2 cache size\n");
  709. } else {
  710. uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
  711. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
  712. /* TLB refill */
  713. cvmx_l2c_lock_mem_region(ebase, 0x100);
  714. #endif
  715. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
  716. /* General exception */
  717. cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
  718. #endif
  719. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
  720. /* Interrupt handler */
  721. cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
  722. #endif
  723. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
  724. cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
  725. cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
  726. #endif
  727. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
  728. cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
  729. #endif
  730. }
  731. #endif
  732. octeon_check_cpu_bist();
  733. octeon_uart = octeon_get_boot_uart();
  734. #ifdef CONFIG_SMP
  735. octeon_write_lcd("LinuxSMP");
  736. #else
  737. octeon_write_lcd("Linux");
  738. #endif
  739. octeon_setup_delays();
  740. /*
  741. * BIST should always be enabled when doing a soft reset. L2
  742. * Cache locking for instance is not cleared unless BIST is
  743. * enabled. Unfortunately due to a chip errata G-200 for
  744. * Cn38XX and CN31XX, BIST msut be disabled on these parts.
  745. */
  746. if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
  747. OCTEON_IS_MODEL(OCTEON_CN31XX))
  748. cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
  749. else
  750. cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
  751. /* Default to 64MB in the simulator to speed things up */
  752. if (octeon_is_simulation())
  753. max_memory = 64ull << 20;
  754. arg = strstr(arcs_cmdline, "mem=");
  755. if (arg) {
  756. max_memory = memparse(arg + 4, &p);
  757. if (max_memory == 0)
  758. max_memory = 32ull << 30;
  759. if (*p == '@')
  760. reserve_low_mem = memparse(p + 1, &p);
  761. }
  762. arcs_cmdline[0] = 0;
  763. argc = octeon_boot_desc_ptr->argc;
  764. for (i = 0; i < argc; i++) {
  765. const char *arg =
  766. cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
  767. if ((strncmp(arg, "MEM=", 4) == 0) ||
  768. (strncmp(arg, "mem=", 4) == 0)) {
  769. max_memory = memparse(arg + 4, &p);
  770. if (max_memory == 0)
  771. max_memory = 32ull << 30;
  772. if (*p == '@')
  773. reserve_low_mem = memparse(p + 1, &p);
  774. #ifdef CONFIG_KEXEC
  775. } else if (strncmp(arg, "crashkernel=", 12) == 0) {
  776. crashk_size = memparse(arg+12, &p);
  777. if (*p == '@')
  778. crashk_base = memparse(p+1, &p);
  779. strcat(arcs_cmdline, " ");
  780. strcat(arcs_cmdline, arg);
  781. /*
  782. * To do: switch parsing to new style, something like:
  783. * parse_crashkernel(arg, sysinfo->system_dram_size,
  784. * &crashk_size, &crashk_base);
  785. */
  786. #endif
  787. } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
  788. sizeof(arcs_cmdline) - 1) {
  789. strcat(arcs_cmdline, " ");
  790. strcat(arcs_cmdline, arg);
  791. }
  792. }
  793. if (strstr(arcs_cmdline, "console=") == NULL) {
  794. if (octeon_uart == 1)
  795. strcat(arcs_cmdline, " console=ttyS1,115200");
  796. else
  797. strcat(arcs_cmdline, " console=ttyS0,115200");
  798. }
  799. mips_hpt_frequency = octeon_get_clock_rate();
  800. octeon_init_cvmcount();
  801. _machine_restart = octeon_restart;
  802. _machine_halt = octeon_halt;
  803. #ifdef CONFIG_KEXEC
  804. _machine_kexec_shutdown = octeon_shutdown;
  805. _machine_crash_shutdown = octeon_crash_shutdown;
  806. _machine_kexec_prepare = octeon_kexec_prepare;
  807. #ifdef CONFIG_SMP
  808. _crash_smp_send_stop = octeon_crash_smp_send_stop;
  809. #endif
  810. #endif
  811. octeon_user_io_init();
  812. octeon_setup_smp();
  813. }
  814. /* Exclude a single page from the regions obtained in plat_mem_setup. */
  815. #ifndef CONFIG_CRASH_DUMP
  816. static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
  817. {
  818. if (addr > *mem && addr < *mem + *size) {
  819. u64 inc = addr - *mem;
  820. add_memory_region(*mem, inc, BOOT_MEM_RAM);
  821. *mem += inc;
  822. *size -= inc;
  823. }
  824. if (addr == *mem && *size > PAGE_SIZE) {
  825. *mem += PAGE_SIZE;
  826. *size -= PAGE_SIZE;
  827. }
  828. }
  829. #endif /* CONFIG_CRASH_DUMP */
  830. void __init fw_init_cmdline(void)
  831. {
  832. int i;
  833. octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
  834. for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
  835. const char *arg =
  836. cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
  837. if (strlen(arcs_cmdline) + strlen(arg) + 1 <
  838. sizeof(arcs_cmdline) - 1) {
  839. strcat(arcs_cmdline, " ");
  840. strcat(arcs_cmdline, arg);
  841. }
  842. }
  843. }
  844. void __init *plat_get_fdt(void)
  845. {
  846. octeon_bootinfo =
  847. cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
  848. return phys_to_virt(octeon_bootinfo->fdt_addr);
  849. }
  850. void __init plat_mem_setup(void)
  851. {
  852. uint64_t mem_alloc_size;
  853. uint64_t total;
  854. uint64_t crashk_end;
  855. #ifndef CONFIG_CRASH_DUMP
  856. int64_t memory;
  857. uint64_t kernel_start;
  858. uint64_t kernel_size;
  859. #endif
  860. total = 0;
  861. crashk_end = 0;
  862. /*
  863. * The Mips memory init uses the first memory location for
  864. * some memory vectors. When SPARSEMEM is in use, it doesn't
  865. * verify that the size is big enough for the final
  866. * vectors. Making the smallest chuck 4MB seems to be enough
  867. * to consistently work.
  868. */
  869. mem_alloc_size = 4 << 20;
  870. if (mem_alloc_size > max_memory)
  871. mem_alloc_size = max_memory;
  872. /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
  873. #ifdef CONFIG_CRASH_DUMP
  874. add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM);
  875. total += max_memory;
  876. #else
  877. #ifdef CONFIG_KEXEC
  878. if (crashk_size > 0) {
  879. add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM);
  880. crashk_end = crashk_base + crashk_size;
  881. }
  882. #endif
  883. /*
  884. * When allocating memory, we want incrementing addresses from
  885. * bootmem_alloc so the code in add_memory_region can merge
  886. * regions next to each other.
  887. */
  888. cvmx_bootmem_lock();
  889. while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
  890. && (total < max_memory)) {
  891. memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
  892. __pa_symbol(&_end), -1,
  893. 0x100000,
  894. CVMX_BOOTMEM_FLAG_NO_LOCKING);
  895. if (memory >= 0) {
  896. u64 size = mem_alloc_size;
  897. #ifdef CONFIG_KEXEC
  898. uint64_t end;
  899. #endif
  900. /*
  901. * exclude a page at the beginning and end of
  902. * the 256MB PCIe 'hole' so the kernel will not
  903. * try to allocate multi-page buffers that
  904. * span the discontinuity.
  905. */
  906. memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
  907. &memory, &size);
  908. memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
  909. CVMX_PCIE_BAR1_PHYS_SIZE,
  910. &memory, &size);
  911. #ifdef CONFIG_KEXEC
  912. end = memory + mem_alloc_size;
  913. /*
  914. * This function automatically merges address regions
  915. * next to each other if they are received in
  916. * incrementing order
  917. */
  918. if (memory < crashk_base && end > crashk_end) {
  919. /* region is fully in */
  920. add_memory_region(memory,
  921. crashk_base - memory,
  922. BOOT_MEM_RAM);
  923. total += crashk_base - memory;
  924. add_memory_region(crashk_end,
  925. end - crashk_end,
  926. BOOT_MEM_RAM);
  927. total += end - crashk_end;
  928. continue;
  929. }
  930. if (memory >= crashk_base && end <= crashk_end)
  931. /*
  932. * Entire memory region is within the new
  933. * kernel's memory, ignore it.
  934. */
  935. continue;
  936. if (memory > crashk_base && memory < crashk_end &&
  937. end > crashk_end) {
  938. /*
  939. * Overlap with the beginning of the region,
  940. * reserve the beginning.
  941. */
  942. mem_alloc_size -= crashk_end - memory;
  943. memory = crashk_end;
  944. } else if (memory < crashk_base && end > crashk_base &&
  945. end < crashk_end)
  946. /*
  947. * Overlap with the beginning of the region,
  948. * chop of end.
  949. */
  950. mem_alloc_size -= end - crashk_base;
  951. #endif
  952. add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
  953. total += mem_alloc_size;
  954. /* Recovering mem_alloc_size */
  955. mem_alloc_size = 4 << 20;
  956. } else {
  957. break;
  958. }
  959. }
  960. cvmx_bootmem_unlock();
  961. /* Add the memory region for the kernel. */
  962. kernel_start = (unsigned long) _text;
  963. kernel_size = _end - _text;
  964. /* Adjust for physical offset. */
  965. kernel_start &= ~0xffffffff80000000ULL;
  966. add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
  967. #endif /* CONFIG_CRASH_DUMP */
  968. #ifdef CONFIG_CAVIUM_RESERVE32
  969. /*
  970. * Now that we've allocated the kernel memory it is safe to
  971. * free the reserved region. We free it here so that builtin
  972. * drivers can use the memory.
  973. */
  974. if (octeon_reserve32_memory)
  975. cvmx_bootmem_free_named("CAVIUM_RESERVE32");
  976. #endif /* CONFIG_CAVIUM_RESERVE32 */
  977. if (total == 0)
  978. panic("Unable to allocate memory from "
  979. "cvmx_bootmem_phy_alloc");
  980. }
  981. /*
  982. * Emit one character to the boot UART. Exported for use by the
  983. * watchdog timer.
  984. */
  985. int prom_putchar(char c)
  986. {
  987. uint64_t lsrval;
  988. /* Spin until there is room */
  989. do {
  990. lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
  991. } while ((lsrval & 0x20) == 0);
  992. /* Write the byte */
  993. cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
  994. return 1;
  995. }
  996. EXPORT_SYMBOL(prom_putchar);
  997. void __init prom_free_prom_memory(void)
  998. {
  999. if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
  1000. /* Check for presence of Core-14449 fix. */
  1001. u32 insn;
  1002. u32 *foo;
  1003. foo = &insn;
  1004. asm volatile("# before" : : : "memory");
  1005. prefetch(foo);
  1006. asm volatile(
  1007. ".set push\n\t"
  1008. ".set noreorder\n\t"
  1009. "bal 1f\n\t"
  1010. "nop\n"
  1011. "1:\tlw %0,-12($31)\n\t"
  1012. ".set pop\n\t"
  1013. : "=r" (insn) : : "$31", "memory");
  1014. if ((insn >> 26) != 0x33)
  1015. panic("No PREF instruction at Core-14449 probe point.");
  1016. if (((insn >> 16) & 0x1f) != 28)
  1017. panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
  1018. "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
  1019. insn);
  1020. }
  1021. }
  1022. void __init octeon_fill_mac_addresses(void);
  1023. int octeon_prune_device_tree(void);
  1024. extern const char __appended_dtb;
  1025. extern const char __dtb_octeon_3xxx_begin;
  1026. extern const char __dtb_octeon_68xx_begin;
  1027. void __init device_tree_init(void)
  1028. {
  1029. const void *fdt;
  1030. bool do_prune;
  1031. bool fill_mac;
  1032. #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
  1033. if (!fdt_check_header(&__appended_dtb)) {
  1034. fdt = &__appended_dtb;
  1035. do_prune = false;
  1036. fill_mac = true;
  1037. pr_info("Using appended Device Tree.\n");
  1038. } else
  1039. #endif
  1040. if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
  1041. fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
  1042. if (fdt_check_header(fdt))
  1043. panic("Corrupt Device Tree passed to kernel.");
  1044. do_prune = false;
  1045. fill_mac = false;
  1046. pr_info("Using passed Device Tree.\n");
  1047. } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  1048. fdt = &__dtb_octeon_68xx_begin;
  1049. do_prune = true;
  1050. fill_mac = true;
  1051. } else {
  1052. fdt = &__dtb_octeon_3xxx_begin;
  1053. do_prune = true;
  1054. fill_mac = true;
  1055. }
  1056. initial_boot_params = (void *)fdt;
  1057. if (do_prune) {
  1058. octeon_prune_device_tree();
  1059. pr_info("Using internal Device Tree.\n");
  1060. }
  1061. if (fill_mac)
  1062. octeon_fill_mac_addresses();
  1063. unflatten_and_copy_device_tree();
  1064. init_octeon_system_type();
  1065. }
  1066. static int __initdata disable_octeon_edac_p;
  1067. static int __init disable_octeon_edac(char *str)
  1068. {
  1069. disable_octeon_edac_p = 1;
  1070. return 0;
  1071. }
  1072. early_param("disable_octeon_edac", disable_octeon_edac);
  1073. static char *edac_device_names[] = {
  1074. "octeon_l2c_edac",
  1075. "octeon_pc_edac",
  1076. };
  1077. static int __init edac_devinit(void)
  1078. {
  1079. struct platform_device *dev;
  1080. int i, err = 0;
  1081. int num_lmc;
  1082. char *name;
  1083. if (disable_octeon_edac_p)
  1084. return 0;
  1085. for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
  1086. name = edac_device_names[i];
  1087. dev = platform_device_register_simple(name, -1, NULL, 0);
  1088. if (IS_ERR(dev)) {
  1089. pr_err("Registration of %s failed!\n", name);
  1090. err = PTR_ERR(dev);
  1091. }
  1092. }
  1093. num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
  1094. (OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
  1095. for (i = 0; i < num_lmc; i++) {
  1096. dev = platform_device_register_simple("octeon_lmc_edac",
  1097. i, NULL, 0);
  1098. if (IS_ERR(dev)) {
  1099. pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
  1100. err = PTR_ERR(dev);
  1101. }
  1102. }
  1103. return err;
  1104. }
  1105. device_initcall(edac_devinit);
  1106. static void __initdata *octeon_dummy_iospace;
  1107. static int __init octeon_no_pci_init(void)
  1108. {
  1109. /*
  1110. * Initially assume there is no PCI. The PCI/PCIe platform code will
  1111. * later re-initialize these to correct values if they are present.
  1112. */
  1113. octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
  1114. set_io_port_base((unsigned long)octeon_dummy_iospace);
  1115. ioport_resource.start = MAX_RESOURCE;
  1116. ioport_resource.end = 0;
  1117. return 0;
  1118. }
  1119. core_initcall(octeon_no_pci_init);
  1120. static int __init octeon_no_pci_release(void)
  1121. {
  1122. /*
  1123. * Release the allocated memory if a real IO space is there.
  1124. */
  1125. if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
  1126. vfree(octeon_dummy_iospace);
  1127. return 0;
  1128. }
  1129. late_initcall(octeon_no_pci_release);