setup.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/cpu.h>
  4. #include <linux/memblock.h>
  5. #include <linux/seq_file.h>
  6. #include <linux/console.h>
  7. #include <linux/screen_info.h>
  8. #include <linux/delay.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/of_fdt.h>
  11. #include <linux/of_platform.h>
  12. #include <asm/setup.h>
  13. #include <asm/sections.h>
  14. #include <asm/proc-fns.h>
  15. #include <asm/cache_info.h>
  16. #include <asm/elf.h>
  17. #include <nds32_intrinsic.h>
  18. #define HWCAP_MFUSR_PC 0x000001
  19. #define HWCAP_EXT 0x000002
  20. #define HWCAP_EXT2 0x000004
  21. #define HWCAP_FPU 0x000008
  22. #define HWCAP_AUDIO 0x000010
  23. #define HWCAP_BASE16 0x000020
  24. #define HWCAP_STRING 0x000040
  25. #define HWCAP_REDUCED_REGS 0x000080
  26. #define HWCAP_VIDEO 0x000100
  27. #define HWCAP_ENCRYPT 0x000200
  28. #define HWCAP_EDM 0x000400
  29. #define HWCAP_LMDMA 0x000800
  30. #define HWCAP_PFM 0x001000
  31. #define HWCAP_HSMP 0x002000
  32. #define HWCAP_TRACE 0x004000
  33. #define HWCAP_DIV 0x008000
  34. #define HWCAP_MAC 0x010000
  35. #define HWCAP_L2C 0x020000
  36. #define HWCAP_FPU_DP 0x040000
  37. #define HWCAP_V2 0x080000
  38. #define HWCAP_DX_REGS 0x100000
  39. unsigned long cpu_id, cpu_rev, cpu_cfgid;
  40. char cpu_series;
  41. char *endianness = NULL;
  42. unsigned int __atags_pointer __initdata;
  43. unsigned int elf_hwcap;
  44. EXPORT_SYMBOL(elf_hwcap);
  45. /*
  46. * The following string table, must sync with HWCAP_xx bitmask,
  47. * which is defined in <asm/procinfo.h>
  48. */
  49. static const char *hwcap_str[] = {
  50. "mfusr_pc",
  51. "perf1",
  52. "perf2",
  53. "fpu",
  54. "audio",
  55. "16b",
  56. "string",
  57. "reduced_regs",
  58. "video",
  59. "encrypt",
  60. "edm",
  61. "lmdma",
  62. "pfm",
  63. "hsmp",
  64. "trace",
  65. "div",
  66. "mac",
  67. "l2c",
  68. "dx_regs",
  69. "v2",
  70. NULL,
  71. };
  72. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  73. #define WRITE_METHOD "write through"
  74. #else
  75. #define WRITE_METHOD "write back"
  76. #endif
  77. struct cache_info L1_cache_info[2];
  78. static void __init dump_cpu_info(int cpu)
  79. {
  80. int i, p = 0;
  81. char str[sizeof(hwcap_str) + 16];
  82. for (i = 0; hwcap_str[i]; i++) {
  83. if (elf_hwcap & (1 << i)) {
  84. sprintf(str + p, "%s ", hwcap_str[i]);
  85. p += strlen(hwcap_str[i]) + 1;
  86. }
  87. }
  88. pr_info("CPU%d Features: %s\n", cpu, str);
  89. L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE);
  90. L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE);
  91. L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE);
  92. L1_cache_info[ICACHE].size =
  93. L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size *
  94. L1_cache_info[ICACHE].sets / 1024;
  95. pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size,
  96. L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways,
  97. L1_cache_info[ICACHE].line_size);
  98. L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE);
  99. L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE);
  100. L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE);
  101. L1_cache_info[DCACHE].size =
  102. L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size *
  103. L1_cache_info[DCACHE].sets / 1024;
  104. pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size,
  105. L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways,
  106. L1_cache_info[DCACHE].line_size);
  107. pr_info("L1 D-Cache is %s\n", WRITE_METHOD);
  108. if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES)
  109. pr_crit
  110. ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n",
  111. L1_cache_info[DCACHE].size, L1_CACHE_BYTES);
  112. #ifdef CONFIG_CPU_CACHE_ALIASING
  113. {
  114. int aliasing_num;
  115. aliasing_num =
  116. L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE /
  117. L1_cache_info[ICACHE].ways;
  118. L1_cache_info[ICACHE].aliasing_num = aliasing_num;
  119. L1_cache_info[ICACHE].aliasing_mask =
  120. (aliasing_num - 1) << PAGE_SHIFT;
  121. aliasing_num =
  122. L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE /
  123. L1_cache_info[DCACHE].ways;
  124. L1_cache_info[DCACHE].aliasing_num = aliasing_num;
  125. L1_cache_info[DCACHE].aliasing_mask =
  126. (aliasing_num - 1) << PAGE_SHIFT;
  127. }
  128. #endif
  129. }
  130. static void __init setup_cpuinfo(void)
  131. {
  132. unsigned long tmp = 0, cpu_name;
  133. cpu_dcache_inval_all();
  134. cpu_icache_inval_all();
  135. __nds32__isb();
  136. cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID;
  137. cpu_name = ((cpu_id) & 0xf0) >> 4;
  138. cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N';
  139. cpu_id = cpu_id & 0xf;
  140. cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV;
  141. cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID;
  142. pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n",
  143. cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid);
  144. elf_hwcap |= HWCAP_MFUSR_PC;
  145. if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) {
  146. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV)
  147. elf_hwcap |= HWCAP_DIV;
  148. if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC)
  149. || (cpu_id == 12 && cpu_rev < 4))
  150. elf_hwcap |= HWCAP_MAC;
  151. } else {
  152. elf_hwcap |= HWCAP_V2;
  153. elf_hwcap |= HWCAP_DIV;
  154. elf_hwcap |= HWCAP_MAC;
  155. }
  156. if (cpu_cfgid & 0x0001)
  157. elf_hwcap |= HWCAP_EXT;
  158. if (cpu_cfgid & 0x0002)
  159. elf_hwcap |= HWCAP_BASE16;
  160. if (cpu_cfgid & 0x0004)
  161. elf_hwcap |= HWCAP_EXT2;
  162. if (cpu_cfgid & 0x0008)
  163. elf_hwcap |= HWCAP_FPU;
  164. if (cpu_cfgid & 0x0010)
  165. elf_hwcap |= HWCAP_STRING;
  166. if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE)
  167. endianness = "MSB";
  168. else
  169. endianness = "LSB";
  170. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM)
  171. elf_hwcap |= HWCAP_EDM;
  172. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA)
  173. elf_hwcap |= HWCAP_LMDMA;
  174. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM)
  175. elf_hwcap |= HWCAP_PFM;
  176. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP)
  177. elf_hwcap |= HWCAP_HSMP;
  178. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE)
  179. elf_hwcap |= HWCAP_TRACE;
  180. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO)
  181. elf_hwcap |= HWCAP_AUDIO;
  182. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
  183. elf_hwcap |= HWCAP_L2C;
  184. tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
  185. if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
  186. tmp |= CACHE_CTL_mskDC_EN;
  187. if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE))
  188. tmp |= CACHE_CTL_mskIC_EN;
  189. __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
  190. dump_cpu_info(smp_processor_id());
  191. }
  192. static void __init setup_memory(void)
  193. {
  194. unsigned long ram_start_pfn;
  195. unsigned long free_ram_start_pfn;
  196. phys_addr_t memory_start, memory_end;
  197. struct memblock_region *region;
  198. memory_end = memory_start = 0;
  199. /* Find main memory where is the kernel */
  200. for_each_memblock(memory, region) {
  201. memory_start = region->base;
  202. memory_end = region->base + region->size;
  203. pr_info("%s: Memory: 0x%x-0x%x\n", __func__,
  204. memory_start, memory_end);
  205. }
  206. if (!memory_end) {
  207. panic("No memory!");
  208. }
  209. ram_start_pfn = PFN_UP(memblock_start_of_DRAM());
  210. /* free_ram_start_pfn is first page after kernel */
  211. free_ram_start_pfn = PFN_UP(__pa(&_end));
  212. max_pfn = PFN_DOWN(memblock_end_of_DRAM());
  213. /* it could update max_pfn */
  214. if (max_pfn - ram_start_pfn <= MAXMEM_PFN)
  215. max_low_pfn = max_pfn;
  216. else {
  217. max_low_pfn = MAXMEM_PFN + ram_start_pfn;
  218. if (!IS_ENABLED(CONFIG_HIGHMEM))
  219. max_pfn = MAXMEM_PFN + ram_start_pfn;
  220. }
  221. /* high_memory is related with VMALLOC */
  222. high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
  223. min_low_pfn = free_ram_start_pfn;
  224. /*
  225. * initialize the boot-time allocator (with low memory only).
  226. *
  227. * This makes the memory from the end of the kernel to the end of
  228. * RAM usable.
  229. */
  230. memblock_set_bottom_up(true);
  231. memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn));
  232. early_init_fdt_reserve_self();
  233. early_init_fdt_scan_reserved_mem();
  234. memblock_dump_all();
  235. }
  236. void __init setup_arch(char **cmdline_p)
  237. {
  238. early_init_devtree(__atags_pointer ? \
  239. phys_to_virt(__atags_pointer) : __dtb_start);
  240. setup_cpuinfo();
  241. init_mm.start_code = (unsigned long)&_stext;
  242. init_mm.end_code = (unsigned long)&_etext;
  243. init_mm.end_data = (unsigned long)&_edata;
  244. init_mm.brk = (unsigned long)&_end;
  245. /* setup bootmem allocator */
  246. setup_memory();
  247. /* paging_init() sets up the MMU and marks all pages as reserved */
  248. paging_init();
  249. /* invalidate all TLB entries because the new mapping is created */
  250. __nds32__tlbop_flua();
  251. /* use generic way to parse */
  252. parse_early_param();
  253. unflatten_and_copy_device_tree();
  254. if(IS_ENABLED(CONFIG_VT)) {
  255. if(IS_ENABLED(CONFIG_DUMMY_CONSOLE))
  256. conswitchp = &dummy_con;
  257. }
  258. *cmdline_p = boot_command_line;
  259. early_trap_init();
  260. }
  261. static int c_show(struct seq_file *m, void *v)
  262. {
  263. int i;
  264. seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n",
  265. cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid);
  266. seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n",
  267. CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) *
  268. CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE),
  269. CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE));
  270. seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n",
  271. CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) *
  272. CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE),
  273. CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE));
  274. seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
  275. loops_per_jiffy / (500000 / HZ),
  276. (loops_per_jiffy / (5000 / HZ)) % 100);
  277. /* dump out the processor features */
  278. seq_puts(m, "Features\t: ");
  279. for (i = 0; hwcap_str[i]; i++)
  280. if (elf_hwcap & (1 << i))
  281. seq_printf(m, "%s ", hwcap_str[i]);
  282. seq_puts(m, "\n\n");
  283. return 0;
  284. }
  285. static void *c_start(struct seq_file *m, loff_t * pos)
  286. {
  287. return *pos < 1 ? (void *)1 : NULL;
  288. }
  289. static void *c_next(struct seq_file *m, void *v, loff_t * pos)
  290. {
  291. ++*pos;
  292. return NULL;
  293. }
  294. static void c_stop(struct seq_file *m, void *v)
  295. {
  296. }
  297. struct seq_operations cpuinfo_op = {
  298. .start = c_start,
  299. .next = c_next,
  300. .stop = c_stop,
  301. .show = c_show
  302. };