init_64.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878
  1. /*
  2. * arch/sparc64/mm/init.c
  3. *
  4. * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/initrd.h>
  16. #include <linux/swap.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/poison.h>
  19. #include <linux/fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/cache.h>
  23. #include <linux/sort.h>
  24. #include <linux/ioport.h>
  25. #include <linux/percpu.h>
  26. #include <linux/memblock.h>
  27. #include <linux/mmzone.h>
  28. #include <linux/gfp.h>
  29. #include <asm/head.h>
  30. #include <asm/page.h>
  31. #include <asm/pgalloc.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/oplib.h>
  34. #include <asm/iommu.h>
  35. #include <asm/io.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/tlbflush.h>
  39. #include <asm/dma.h>
  40. #include <asm/starfire.h>
  41. #include <asm/tlb.h>
  42. #include <asm/spitfire.h>
  43. #include <asm/sections.h>
  44. #include <asm/tsb.h>
  45. #include <asm/hypervisor.h>
  46. #include <asm/prom.h>
  47. #include <asm/mdesc.h>
  48. #include <asm/cpudata.h>
  49. #include <asm/setup.h>
  50. #include <asm/irq.h>
  51. #include "init_64.h"
  52. unsigned long kern_linear_pte_xor[4] __read_mostly;
  53. static unsigned long page_cache4v_flag;
  54. /* A bitmap, two bits for every 256MB of physical memory. These two
  55. * bits determine what page size we use for kernel linear
  56. * translations. They form an index into kern_linear_pte_xor[]. The
  57. * value in the indexed slot is XOR'd with the TLB miss virtual
  58. * address to form the resulting TTE. The mapping is:
  59. *
  60. * 0 ==> 4MB
  61. * 1 ==> 256MB
  62. * 2 ==> 2GB
  63. * 3 ==> 16GB
  64. *
  65. * All sun4v chips support 256MB pages. Only SPARC-T4 and later
  66. * support 2GB pages, and hopefully future cpus will support the 16GB
  67. * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
  68. * if these larger page sizes are not supported by the cpu.
  69. *
  70. * It would be nice to determine this from the machine description
  71. * 'cpu' properties, but we need to have this table setup before the
  72. * MDESC is initialized.
  73. */
  74. #ifndef CONFIG_DEBUG_PAGEALLOC
  75. /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
  76. * Space is allocated for this right after the trap table in
  77. * arch/sparc64/kernel/head.S
  78. */
  79. extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
  80. #endif
  81. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  82. static unsigned long cpu_pgsz_mask;
  83. #define MAX_BANKS 1024
  84. static struct linux_prom64_registers pavail[MAX_BANKS];
  85. static int pavail_ents;
  86. static int cmp_p64(const void *a, const void *b)
  87. {
  88. const struct linux_prom64_registers *x = a, *y = b;
  89. if (x->phys_addr > y->phys_addr)
  90. return 1;
  91. if (x->phys_addr < y->phys_addr)
  92. return -1;
  93. return 0;
  94. }
  95. static void __init read_obp_memory(const char *property,
  96. struct linux_prom64_registers *regs,
  97. int *num_ents)
  98. {
  99. phandle node = prom_finddevice("/memory");
  100. int prop_size = prom_getproplen(node, property);
  101. int ents, ret, i;
  102. ents = prop_size / sizeof(struct linux_prom64_registers);
  103. if (ents > MAX_BANKS) {
  104. prom_printf("The machine has more %s property entries than "
  105. "this kernel can support (%d).\n",
  106. property, MAX_BANKS);
  107. prom_halt();
  108. }
  109. ret = prom_getproperty(node, property, (char *) regs, prop_size);
  110. if (ret == -1) {
  111. prom_printf("Couldn't get %s property from /memory.\n",
  112. property);
  113. prom_halt();
  114. }
  115. /* Sanitize what we got from the firmware, by page aligning
  116. * everything.
  117. */
  118. for (i = 0; i < ents; i++) {
  119. unsigned long base, size;
  120. base = regs[i].phys_addr;
  121. size = regs[i].reg_size;
  122. size &= PAGE_MASK;
  123. if (base & ~PAGE_MASK) {
  124. unsigned long new_base = PAGE_ALIGN(base);
  125. size -= new_base - base;
  126. if ((long) size < 0L)
  127. size = 0UL;
  128. base = new_base;
  129. }
  130. if (size == 0UL) {
  131. /* If it is empty, simply get rid of it.
  132. * This simplifies the logic of the other
  133. * functions that process these arrays.
  134. */
  135. memmove(&regs[i], &regs[i + 1],
  136. (ents - i - 1) * sizeof(regs[0]));
  137. i--;
  138. ents--;
  139. continue;
  140. }
  141. regs[i].phys_addr = base;
  142. regs[i].reg_size = size;
  143. }
  144. *num_ents = ents;
  145. sort(regs, ents, sizeof(struct linux_prom64_registers),
  146. cmp_p64, NULL);
  147. }
  148. /* Kernel physical address base and size in bytes. */
  149. unsigned long kern_base __read_mostly;
  150. unsigned long kern_size __read_mostly;
  151. /* Initial ramdisk setup */
  152. extern unsigned long sparc_ramdisk_image64;
  153. extern unsigned int sparc_ramdisk_image;
  154. extern unsigned int sparc_ramdisk_size;
  155. struct page *mem_map_zero __read_mostly;
  156. EXPORT_SYMBOL(mem_map_zero);
  157. unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
  158. unsigned long sparc64_kern_pri_context __read_mostly;
  159. unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
  160. unsigned long sparc64_kern_sec_context __read_mostly;
  161. int num_kernel_image_mappings;
  162. #ifdef CONFIG_DEBUG_DCFLUSH
  163. atomic_t dcpage_flushes = ATOMIC_INIT(0);
  164. #ifdef CONFIG_SMP
  165. atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
  166. #endif
  167. #endif
  168. inline void flush_dcache_page_impl(struct page *page)
  169. {
  170. BUG_ON(tlb_type == hypervisor);
  171. #ifdef CONFIG_DEBUG_DCFLUSH
  172. atomic_inc(&dcpage_flushes);
  173. #endif
  174. #ifdef DCACHE_ALIASING_POSSIBLE
  175. __flush_dcache_page(page_address(page),
  176. ((tlb_type == spitfire) &&
  177. page_mapping(page) != NULL));
  178. #else
  179. if (page_mapping(page) != NULL &&
  180. tlb_type == spitfire)
  181. __flush_icache_page(__pa(page_address(page)));
  182. #endif
  183. }
  184. #define PG_dcache_dirty PG_arch_1
  185. #define PG_dcache_cpu_shift 32UL
  186. #define PG_dcache_cpu_mask \
  187. ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
  188. #define dcache_dirty_cpu(page) \
  189. (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
  190. static inline void set_dcache_dirty(struct page *page, int this_cpu)
  191. {
  192. unsigned long mask = this_cpu;
  193. unsigned long non_cpu_bits;
  194. non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
  195. mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
  196. __asm__ __volatile__("1:\n\t"
  197. "ldx [%2], %%g7\n\t"
  198. "and %%g7, %1, %%g1\n\t"
  199. "or %%g1, %0, %%g1\n\t"
  200. "casx [%2], %%g7, %%g1\n\t"
  201. "cmp %%g7, %%g1\n\t"
  202. "bne,pn %%xcc, 1b\n\t"
  203. " nop"
  204. : /* no outputs */
  205. : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
  206. : "g1", "g7");
  207. }
  208. static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
  209. {
  210. unsigned long mask = (1UL << PG_dcache_dirty);
  211. __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
  212. "1:\n\t"
  213. "ldx [%2], %%g7\n\t"
  214. "srlx %%g7, %4, %%g1\n\t"
  215. "and %%g1, %3, %%g1\n\t"
  216. "cmp %%g1, %0\n\t"
  217. "bne,pn %%icc, 2f\n\t"
  218. " andn %%g7, %1, %%g1\n\t"
  219. "casx [%2], %%g7, %%g1\n\t"
  220. "cmp %%g7, %%g1\n\t"
  221. "bne,pn %%xcc, 1b\n\t"
  222. " nop\n"
  223. "2:"
  224. : /* no outputs */
  225. : "r" (cpu), "r" (mask), "r" (&page->flags),
  226. "i" (PG_dcache_cpu_mask),
  227. "i" (PG_dcache_cpu_shift)
  228. : "g1", "g7");
  229. }
  230. static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
  231. {
  232. unsigned long tsb_addr = (unsigned long) ent;
  233. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  234. tsb_addr = __pa(tsb_addr);
  235. __tsb_insert(tsb_addr, tag, pte);
  236. }
  237. unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
  238. static void flush_dcache(unsigned long pfn)
  239. {
  240. struct page *page;
  241. page = pfn_to_page(pfn);
  242. if (page) {
  243. unsigned long pg_flags;
  244. pg_flags = page->flags;
  245. if (pg_flags & (1UL << PG_dcache_dirty)) {
  246. int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
  247. PG_dcache_cpu_mask);
  248. int this_cpu = get_cpu();
  249. /* This is just to optimize away some function calls
  250. * in the SMP case.
  251. */
  252. if (cpu == this_cpu)
  253. flush_dcache_page_impl(page);
  254. else
  255. smp_flush_dcache_page_impl(page, cpu);
  256. clear_dcache_dirty_cpu(page, cpu);
  257. put_cpu();
  258. }
  259. }
  260. }
  261. /* mm->context.lock must be held */
  262. static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
  263. unsigned long tsb_hash_shift, unsigned long address,
  264. unsigned long tte)
  265. {
  266. struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
  267. unsigned long tag;
  268. if (unlikely(!tsb))
  269. return;
  270. tsb += ((address >> tsb_hash_shift) &
  271. (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
  272. tag = (address >> 22UL);
  273. tsb_insert(tsb, tag, tte);
  274. }
  275. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  276. static inline bool is_hugetlb_pte(pte_t pte)
  277. {
  278. if ((tlb_type == hypervisor &&
  279. (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
  280. (tlb_type != hypervisor &&
  281. (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
  282. return true;
  283. return false;
  284. }
  285. #endif
  286. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
  287. {
  288. struct mm_struct *mm;
  289. unsigned long flags;
  290. pte_t pte = *ptep;
  291. if (tlb_type != hypervisor) {
  292. unsigned long pfn = pte_pfn(pte);
  293. if (pfn_valid(pfn))
  294. flush_dcache(pfn);
  295. }
  296. mm = vma->vm_mm;
  297. /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
  298. if (!pte_accessible(mm, pte))
  299. return;
  300. spin_lock_irqsave(&mm->context.lock, flags);
  301. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  302. if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
  303. __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
  304. address, pte_val(pte));
  305. else
  306. #endif
  307. __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
  308. address, pte_val(pte));
  309. spin_unlock_irqrestore(&mm->context.lock, flags);
  310. }
  311. void flush_dcache_page(struct page *page)
  312. {
  313. struct address_space *mapping;
  314. int this_cpu;
  315. if (tlb_type == hypervisor)
  316. return;
  317. /* Do not bother with the expensive D-cache flush if it
  318. * is merely the zero page. The 'bigcore' testcase in GDB
  319. * causes this case to run millions of times.
  320. */
  321. if (page == ZERO_PAGE(0))
  322. return;
  323. this_cpu = get_cpu();
  324. mapping = page_mapping(page);
  325. if (mapping && !mapping_mapped(mapping)) {
  326. int dirty = test_bit(PG_dcache_dirty, &page->flags);
  327. if (dirty) {
  328. int dirty_cpu = dcache_dirty_cpu(page);
  329. if (dirty_cpu == this_cpu)
  330. goto out;
  331. smp_flush_dcache_page_impl(page, dirty_cpu);
  332. }
  333. set_dcache_dirty(page, this_cpu);
  334. } else {
  335. /* We could delay the flush for the !page_mapping
  336. * case too. But that case is for exec env/arg
  337. * pages and those are %99 certainly going to get
  338. * faulted into the tlb (and thus flushed) anyways.
  339. */
  340. flush_dcache_page_impl(page);
  341. }
  342. out:
  343. put_cpu();
  344. }
  345. EXPORT_SYMBOL(flush_dcache_page);
  346. void __kprobes flush_icache_range(unsigned long start, unsigned long end)
  347. {
  348. /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
  349. if (tlb_type == spitfire) {
  350. unsigned long kaddr;
  351. /* This code only runs on Spitfire cpus so this is
  352. * why we can assume _PAGE_PADDR_4U.
  353. */
  354. for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
  355. unsigned long paddr, mask = _PAGE_PADDR_4U;
  356. if (kaddr >= PAGE_OFFSET)
  357. paddr = kaddr & mask;
  358. else {
  359. pgd_t *pgdp = pgd_offset_k(kaddr);
  360. pud_t *pudp = pud_offset(pgdp, kaddr);
  361. pmd_t *pmdp = pmd_offset(pudp, kaddr);
  362. pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
  363. paddr = pte_val(*ptep) & mask;
  364. }
  365. __flush_icache_page(paddr);
  366. }
  367. }
  368. }
  369. EXPORT_SYMBOL(flush_icache_range);
  370. void mmu_info(struct seq_file *m)
  371. {
  372. static const char *pgsz_strings[] = {
  373. "8K", "64K", "512K", "4MB", "32MB",
  374. "256MB", "2GB", "16GB",
  375. };
  376. int i, printed;
  377. if (tlb_type == cheetah)
  378. seq_printf(m, "MMU Type\t: Cheetah\n");
  379. else if (tlb_type == cheetah_plus)
  380. seq_printf(m, "MMU Type\t: Cheetah+\n");
  381. else if (tlb_type == spitfire)
  382. seq_printf(m, "MMU Type\t: Spitfire\n");
  383. else if (tlb_type == hypervisor)
  384. seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
  385. else
  386. seq_printf(m, "MMU Type\t: ???\n");
  387. seq_printf(m, "MMU PGSZs\t: ");
  388. printed = 0;
  389. for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
  390. if (cpu_pgsz_mask & (1UL << i)) {
  391. seq_printf(m, "%s%s",
  392. printed ? "," : "", pgsz_strings[i]);
  393. printed++;
  394. }
  395. }
  396. seq_putc(m, '\n');
  397. #ifdef CONFIG_DEBUG_DCFLUSH
  398. seq_printf(m, "DCPageFlushes\t: %d\n",
  399. atomic_read(&dcpage_flushes));
  400. #ifdef CONFIG_SMP
  401. seq_printf(m, "DCPageFlushesXC\t: %d\n",
  402. atomic_read(&dcpage_flushes_xcall));
  403. #endif /* CONFIG_SMP */
  404. #endif /* CONFIG_DEBUG_DCFLUSH */
  405. }
  406. struct linux_prom_translation prom_trans[512] __read_mostly;
  407. unsigned int prom_trans_ents __read_mostly;
  408. unsigned long kern_locked_tte_data;
  409. /* The obp translations are saved based on 8k pagesize, since obp can
  410. * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  411. * HI_OBP_ADDRESS range are handled in ktlb.S.
  412. */
  413. static inline int in_obp_range(unsigned long vaddr)
  414. {
  415. return (vaddr >= LOW_OBP_ADDRESS &&
  416. vaddr < HI_OBP_ADDRESS);
  417. }
  418. static int cmp_ptrans(const void *a, const void *b)
  419. {
  420. const struct linux_prom_translation *x = a, *y = b;
  421. if (x->virt > y->virt)
  422. return 1;
  423. if (x->virt < y->virt)
  424. return -1;
  425. return 0;
  426. }
  427. /* Read OBP translations property into 'prom_trans[]'. */
  428. static void __init read_obp_translations(void)
  429. {
  430. int n, node, ents, first, last, i;
  431. node = prom_finddevice("/virtual-memory");
  432. n = prom_getproplen(node, "translations");
  433. if (unlikely(n == 0 || n == -1)) {
  434. prom_printf("prom_mappings: Couldn't get size.\n");
  435. prom_halt();
  436. }
  437. if (unlikely(n > sizeof(prom_trans))) {
  438. prom_printf("prom_mappings: Size %d is too big.\n", n);
  439. prom_halt();
  440. }
  441. if ((n = prom_getproperty(node, "translations",
  442. (char *)&prom_trans[0],
  443. sizeof(prom_trans))) == -1) {
  444. prom_printf("prom_mappings: Couldn't get property.\n");
  445. prom_halt();
  446. }
  447. n = n / sizeof(struct linux_prom_translation);
  448. ents = n;
  449. sort(prom_trans, ents, sizeof(struct linux_prom_translation),
  450. cmp_ptrans, NULL);
  451. /* Now kick out all the non-OBP entries. */
  452. for (i = 0; i < ents; i++) {
  453. if (in_obp_range(prom_trans[i].virt))
  454. break;
  455. }
  456. first = i;
  457. for (; i < ents; i++) {
  458. if (!in_obp_range(prom_trans[i].virt))
  459. break;
  460. }
  461. last = i;
  462. for (i = 0; i < (last - first); i++) {
  463. struct linux_prom_translation *src = &prom_trans[i + first];
  464. struct linux_prom_translation *dest = &prom_trans[i];
  465. *dest = *src;
  466. }
  467. for (; i < ents; i++) {
  468. struct linux_prom_translation *dest = &prom_trans[i];
  469. dest->virt = dest->size = dest->data = 0x0UL;
  470. }
  471. prom_trans_ents = last - first;
  472. if (tlb_type == spitfire) {
  473. /* Clear diag TTE bits. */
  474. for (i = 0; i < prom_trans_ents; i++)
  475. prom_trans[i].data &= ~0x0003fe0000000000UL;
  476. }
  477. /* Force execute bit on. */
  478. for (i = 0; i < prom_trans_ents; i++)
  479. prom_trans[i].data |= (tlb_type == hypervisor ?
  480. _PAGE_EXEC_4V : _PAGE_EXEC_4U);
  481. }
  482. static void __init hypervisor_tlb_lock(unsigned long vaddr,
  483. unsigned long pte,
  484. unsigned long mmu)
  485. {
  486. unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
  487. if (ret != 0) {
  488. prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
  489. "errors with %lx\n", vaddr, 0, pte, mmu, ret);
  490. prom_halt();
  491. }
  492. }
  493. static unsigned long kern_large_tte(unsigned long paddr);
  494. static void __init remap_kernel(void)
  495. {
  496. unsigned long phys_page, tte_vaddr, tte_data;
  497. int i, tlb_ent = sparc64_highest_locked_tlbent();
  498. tte_vaddr = (unsigned long) KERNBASE;
  499. phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
  500. tte_data = kern_large_tte(phys_page);
  501. kern_locked_tte_data = tte_data;
  502. /* Now lock us into the TLBs via Hypervisor or OBP. */
  503. if (tlb_type == hypervisor) {
  504. for (i = 0; i < num_kernel_image_mappings; i++) {
  505. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
  506. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
  507. tte_vaddr += 0x400000;
  508. tte_data += 0x400000;
  509. }
  510. } else {
  511. for (i = 0; i < num_kernel_image_mappings; i++) {
  512. prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
  513. prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
  514. tte_vaddr += 0x400000;
  515. tte_data += 0x400000;
  516. }
  517. sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
  518. }
  519. if (tlb_type == cheetah_plus) {
  520. sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
  521. CTX_CHEETAH_PLUS_NUC);
  522. sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
  523. sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
  524. }
  525. }
  526. static void __init inherit_prom_mappings(void)
  527. {
  528. /* Now fixup OBP's idea about where we really are mapped. */
  529. printk("Remapping the kernel... ");
  530. remap_kernel();
  531. printk("done.\n");
  532. }
  533. void prom_world(int enter)
  534. {
  535. if (!enter)
  536. set_fs(get_fs());
  537. __asm__ __volatile__("flushw");
  538. }
  539. void __flush_dcache_range(unsigned long start, unsigned long end)
  540. {
  541. unsigned long va;
  542. if (tlb_type == spitfire) {
  543. int n = 0;
  544. for (va = start; va < end; va += 32) {
  545. spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
  546. if (++n >= 512)
  547. break;
  548. }
  549. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  550. start = __pa(start);
  551. end = __pa(end);
  552. for (va = start; va < end; va += 32)
  553. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  554. "membar #Sync"
  555. : /* no outputs */
  556. : "r" (va),
  557. "i" (ASI_DCACHE_INVALIDATE));
  558. }
  559. }
  560. EXPORT_SYMBOL(__flush_dcache_range);
  561. /* get_new_mmu_context() uses "cache + 1". */
  562. DEFINE_SPINLOCK(ctx_alloc_lock);
  563. unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
  564. #define MAX_CTX_NR (1UL << CTX_NR_BITS)
  565. #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
  566. DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
  567. /* Caller does TLB context flushing on local CPU if necessary.
  568. * The caller also ensures that CTX_VALID(mm->context) is false.
  569. *
  570. * We must be careful about boundary cases so that we never
  571. * let the user have CTX 0 (nucleus) or we ever use a CTX
  572. * version of zero (and thus NO_CONTEXT would not be caught
  573. * by version mis-match tests in mmu_context.h).
  574. *
  575. * Always invoked with interrupts disabled.
  576. */
  577. void get_new_mmu_context(struct mm_struct *mm)
  578. {
  579. unsigned long ctx, new_ctx;
  580. unsigned long orig_pgsz_bits;
  581. int new_version;
  582. spin_lock(&ctx_alloc_lock);
  583. orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
  584. ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
  585. new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
  586. new_version = 0;
  587. if (new_ctx >= (1 << CTX_NR_BITS)) {
  588. new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
  589. if (new_ctx >= ctx) {
  590. int i;
  591. new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
  592. CTX_FIRST_VERSION;
  593. if (new_ctx == 1)
  594. new_ctx = CTX_FIRST_VERSION;
  595. /* Don't call memset, for 16 entries that's just
  596. * plain silly...
  597. */
  598. mmu_context_bmap[0] = 3;
  599. mmu_context_bmap[1] = 0;
  600. mmu_context_bmap[2] = 0;
  601. mmu_context_bmap[3] = 0;
  602. for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
  603. mmu_context_bmap[i + 0] = 0;
  604. mmu_context_bmap[i + 1] = 0;
  605. mmu_context_bmap[i + 2] = 0;
  606. mmu_context_bmap[i + 3] = 0;
  607. }
  608. new_version = 1;
  609. goto out;
  610. }
  611. }
  612. mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
  613. new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
  614. out:
  615. tlb_context_cache = new_ctx;
  616. mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
  617. spin_unlock(&ctx_alloc_lock);
  618. if (unlikely(new_version))
  619. smp_new_mmu_context_version();
  620. }
  621. static int numa_enabled = 1;
  622. static int numa_debug;
  623. static int __init early_numa(char *p)
  624. {
  625. if (!p)
  626. return 0;
  627. if (strstr(p, "off"))
  628. numa_enabled = 0;
  629. if (strstr(p, "debug"))
  630. numa_debug = 1;
  631. return 0;
  632. }
  633. early_param("numa", early_numa);
  634. #define numadbg(f, a...) \
  635. do { if (numa_debug) \
  636. printk(KERN_INFO f, ## a); \
  637. } while (0)
  638. static void __init find_ramdisk(unsigned long phys_base)
  639. {
  640. #ifdef CONFIG_BLK_DEV_INITRD
  641. if (sparc_ramdisk_image || sparc_ramdisk_image64) {
  642. unsigned long ramdisk_image;
  643. /* Older versions of the bootloader only supported a
  644. * 32-bit physical address for the ramdisk image
  645. * location, stored at sparc_ramdisk_image. Newer
  646. * SILO versions set sparc_ramdisk_image to zero and
  647. * provide a full 64-bit physical address at
  648. * sparc_ramdisk_image64.
  649. */
  650. ramdisk_image = sparc_ramdisk_image;
  651. if (!ramdisk_image)
  652. ramdisk_image = sparc_ramdisk_image64;
  653. /* Another bootloader quirk. The bootloader normalizes
  654. * the physical address to KERNBASE, so we have to
  655. * factor that back out and add in the lowest valid
  656. * physical page address to get the true physical address.
  657. */
  658. ramdisk_image -= KERNBASE;
  659. ramdisk_image += phys_base;
  660. numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
  661. ramdisk_image, sparc_ramdisk_size);
  662. initrd_start = ramdisk_image;
  663. initrd_end = ramdisk_image + sparc_ramdisk_size;
  664. memblock_reserve(initrd_start, sparc_ramdisk_size);
  665. initrd_start += PAGE_OFFSET;
  666. initrd_end += PAGE_OFFSET;
  667. }
  668. #endif
  669. }
  670. struct node_mem_mask {
  671. unsigned long mask;
  672. unsigned long val;
  673. };
  674. static struct node_mem_mask node_masks[MAX_NUMNODES];
  675. static int num_node_masks;
  676. #ifdef CONFIG_NEED_MULTIPLE_NODES
  677. int numa_cpu_lookup_table[NR_CPUS];
  678. cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
  679. struct mdesc_mblock {
  680. u64 base;
  681. u64 size;
  682. u64 offset; /* RA-to-PA */
  683. };
  684. static struct mdesc_mblock *mblocks;
  685. static int num_mblocks;
  686. static unsigned long ra_to_pa(unsigned long addr)
  687. {
  688. int i;
  689. for (i = 0; i < num_mblocks; i++) {
  690. struct mdesc_mblock *m = &mblocks[i];
  691. if (addr >= m->base &&
  692. addr < (m->base + m->size)) {
  693. addr += m->offset;
  694. break;
  695. }
  696. }
  697. return addr;
  698. }
  699. static int find_node(unsigned long addr)
  700. {
  701. int i;
  702. addr = ra_to_pa(addr);
  703. for (i = 0; i < num_node_masks; i++) {
  704. struct node_mem_mask *p = &node_masks[i];
  705. if ((addr & p->mask) == p->val)
  706. return i;
  707. }
  708. /* The following condition has been observed on LDOM guests.*/
  709. WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
  710. " rule. Some physical memory will be owned by node 0.");
  711. return 0;
  712. }
  713. static u64 memblock_nid_range(u64 start, u64 end, int *nid)
  714. {
  715. *nid = find_node(start);
  716. start += PAGE_SIZE;
  717. while (start < end) {
  718. int n = find_node(start);
  719. if (n != *nid)
  720. break;
  721. start += PAGE_SIZE;
  722. }
  723. if (start > end)
  724. start = end;
  725. return start;
  726. }
  727. #endif
  728. /* This must be invoked after performing all of the necessary
  729. * memblock_set_node() calls for 'nid'. We need to be able to get
  730. * correct data from get_pfn_range_for_nid().
  731. */
  732. static void __init allocate_node_data(int nid)
  733. {
  734. struct pglist_data *p;
  735. unsigned long start_pfn, end_pfn;
  736. #ifdef CONFIG_NEED_MULTIPLE_NODES
  737. unsigned long paddr;
  738. paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
  739. if (!paddr) {
  740. prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
  741. prom_halt();
  742. }
  743. NODE_DATA(nid) = __va(paddr);
  744. memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
  745. NODE_DATA(nid)->node_id = nid;
  746. #endif
  747. p = NODE_DATA(nid);
  748. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  749. p->node_start_pfn = start_pfn;
  750. p->node_spanned_pages = end_pfn - start_pfn;
  751. }
  752. static void init_node_masks_nonnuma(void)
  753. {
  754. #ifdef CONFIG_NEED_MULTIPLE_NODES
  755. int i;
  756. #endif
  757. numadbg("Initializing tables for non-numa.\n");
  758. node_masks[0].mask = node_masks[0].val = 0;
  759. num_node_masks = 1;
  760. #ifdef CONFIG_NEED_MULTIPLE_NODES
  761. for (i = 0; i < NR_CPUS; i++)
  762. numa_cpu_lookup_table[i] = 0;
  763. cpumask_setall(&numa_cpumask_lookup_table[0]);
  764. #endif
  765. }
  766. #ifdef CONFIG_NEED_MULTIPLE_NODES
  767. struct pglist_data *node_data[MAX_NUMNODES];
  768. EXPORT_SYMBOL(numa_cpu_lookup_table);
  769. EXPORT_SYMBOL(numa_cpumask_lookup_table);
  770. EXPORT_SYMBOL(node_data);
  771. struct mdesc_mlgroup {
  772. u64 node;
  773. u64 latency;
  774. u64 match;
  775. u64 mask;
  776. };
  777. static struct mdesc_mlgroup *mlgroups;
  778. static int num_mlgroups;
  779. static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
  780. u32 cfg_handle)
  781. {
  782. u64 arc;
  783. mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
  784. u64 target = mdesc_arc_target(md, arc);
  785. const u64 *val;
  786. val = mdesc_get_property(md, target,
  787. "cfg-handle", NULL);
  788. if (val && *val == cfg_handle)
  789. return 0;
  790. }
  791. return -ENODEV;
  792. }
  793. static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
  794. u32 cfg_handle)
  795. {
  796. u64 arc, candidate, best_latency = ~(u64)0;
  797. candidate = MDESC_NODE_NULL;
  798. mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
  799. u64 target = mdesc_arc_target(md, arc);
  800. const char *name = mdesc_node_name(md, target);
  801. const u64 *val;
  802. if (strcmp(name, "pio-latency-group"))
  803. continue;
  804. val = mdesc_get_property(md, target, "latency", NULL);
  805. if (!val)
  806. continue;
  807. if (*val < best_latency) {
  808. candidate = target;
  809. best_latency = *val;
  810. }
  811. }
  812. if (candidate == MDESC_NODE_NULL)
  813. return -ENODEV;
  814. return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
  815. }
  816. int of_node_to_nid(struct device_node *dp)
  817. {
  818. const struct linux_prom64_registers *regs;
  819. struct mdesc_handle *md;
  820. u32 cfg_handle;
  821. int count, nid;
  822. u64 grp;
  823. /* This is the right thing to do on currently supported
  824. * SUN4U NUMA platforms as well, as the PCI controller does
  825. * not sit behind any particular memory controller.
  826. */
  827. if (!mlgroups)
  828. return -1;
  829. regs = of_get_property(dp, "reg", NULL);
  830. if (!regs)
  831. return -1;
  832. cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
  833. md = mdesc_grab();
  834. count = 0;
  835. nid = -1;
  836. mdesc_for_each_node_by_name(md, grp, "group") {
  837. if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
  838. nid = count;
  839. break;
  840. }
  841. count++;
  842. }
  843. mdesc_release(md);
  844. return nid;
  845. }
  846. static void __init add_node_ranges(void)
  847. {
  848. struct memblock_region *reg;
  849. for_each_memblock(memory, reg) {
  850. unsigned long size = reg->size;
  851. unsigned long start, end;
  852. start = reg->base;
  853. end = start + size;
  854. while (start < end) {
  855. unsigned long this_end;
  856. int nid;
  857. this_end = memblock_nid_range(start, end, &nid);
  858. numadbg("Setting memblock NUMA node nid[%d] "
  859. "start[%lx] end[%lx]\n",
  860. nid, start, this_end);
  861. memblock_set_node(start, this_end - start,
  862. &memblock.memory, nid);
  863. start = this_end;
  864. }
  865. }
  866. }
  867. static int __init grab_mlgroups(struct mdesc_handle *md)
  868. {
  869. unsigned long paddr;
  870. int count = 0;
  871. u64 node;
  872. mdesc_for_each_node_by_name(md, node, "memory-latency-group")
  873. count++;
  874. if (!count)
  875. return -ENOENT;
  876. paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
  877. SMP_CACHE_BYTES);
  878. if (!paddr)
  879. return -ENOMEM;
  880. mlgroups = __va(paddr);
  881. num_mlgroups = count;
  882. count = 0;
  883. mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
  884. struct mdesc_mlgroup *m = &mlgroups[count++];
  885. const u64 *val;
  886. m->node = node;
  887. val = mdesc_get_property(md, node, "latency", NULL);
  888. m->latency = *val;
  889. val = mdesc_get_property(md, node, "address-match", NULL);
  890. m->match = *val;
  891. val = mdesc_get_property(md, node, "address-mask", NULL);
  892. m->mask = *val;
  893. numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
  894. "match[%llx] mask[%llx]\n",
  895. count - 1, m->node, m->latency, m->match, m->mask);
  896. }
  897. return 0;
  898. }
  899. static int __init grab_mblocks(struct mdesc_handle *md)
  900. {
  901. unsigned long paddr;
  902. int count = 0;
  903. u64 node;
  904. mdesc_for_each_node_by_name(md, node, "mblock")
  905. count++;
  906. if (!count)
  907. return -ENOENT;
  908. paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
  909. SMP_CACHE_BYTES);
  910. if (!paddr)
  911. return -ENOMEM;
  912. mblocks = __va(paddr);
  913. num_mblocks = count;
  914. count = 0;
  915. mdesc_for_each_node_by_name(md, node, "mblock") {
  916. struct mdesc_mblock *m = &mblocks[count++];
  917. const u64 *val;
  918. val = mdesc_get_property(md, node, "base", NULL);
  919. m->base = *val;
  920. val = mdesc_get_property(md, node, "size", NULL);
  921. m->size = *val;
  922. val = mdesc_get_property(md, node,
  923. "address-congruence-offset", NULL);
  924. /* The address-congruence-offset property is optional.
  925. * Explicity zero it be identifty this.
  926. */
  927. if (val)
  928. m->offset = *val;
  929. else
  930. m->offset = 0UL;
  931. numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
  932. count - 1, m->base, m->size, m->offset);
  933. }
  934. return 0;
  935. }
  936. static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
  937. u64 grp, cpumask_t *mask)
  938. {
  939. u64 arc;
  940. cpumask_clear(mask);
  941. mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
  942. u64 target = mdesc_arc_target(md, arc);
  943. const char *name = mdesc_node_name(md, target);
  944. const u64 *id;
  945. if (strcmp(name, "cpu"))
  946. continue;
  947. id = mdesc_get_property(md, target, "id", NULL);
  948. if (*id < nr_cpu_ids)
  949. cpumask_set_cpu(*id, mask);
  950. }
  951. }
  952. static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
  953. {
  954. int i;
  955. for (i = 0; i < num_mlgroups; i++) {
  956. struct mdesc_mlgroup *m = &mlgroups[i];
  957. if (m->node == node)
  958. return m;
  959. }
  960. return NULL;
  961. }
  962. static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
  963. int index)
  964. {
  965. struct mdesc_mlgroup *candidate = NULL;
  966. u64 arc, best_latency = ~(u64)0;
  967. struct node_mem_mask *n;
  968. mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
  969. u64 target = mdesc_arc_target(md, arc);
  970. struct mdesc_mlgroup *m = find_mlgroup(target);
  971. if (!m)
  972. continue;
  973. if (m->latency < best_latency) {
  974. candidate = m;
  975. best_latency = m->latency;
  976. }
  977. }
  978. if (!candidate)
  979. return -ENOENT;
  980. if (num_node_masks != index) {
  981. printk(KERN_ERR "Inconsistent NUMA state, "
  982. "index[%d] != num_node_masks[%d]\n",
  983. index, num_node_masks);
  984. return -EINVAL;
  985. }
  986. n = &node_masks[num_node_masks++];
  987. n->mask = candidate->mask;
  988. n->val = candidate->match;
  989. numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
  990. index, n->mask, n->val, candidate->latency);
  991. return 0;
  992. }
  993. static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
  994. int index)
  995. {
  996. cpumask_t mask;
  997. int cpu;
  998. numa_parse_mdesc_group_cpus(md, grp, &mask);
  999. for_each_cpu(cpu, &mask)
  1000. numa_cpu_lookup_table[cpu] = index;
  1001. cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
  1002. if (numa_debug) {
  1003. printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
  1004. for_each_cpu(cpu, &mask)
  1005. printk("%d ", cpu);
  1006. printk("]\n");
  1007. }
  1008. return numa_attach_mlgroup(md, grp, index);
  1009. }
  1010. static int __init numa_parse_mdesc(void)
  1011. {
  1012. struct mdesc_handle *md = mdesc_grab();
  1013. int i, err, count;
  1014. u64 node;
  1015. node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
  1016. if (node == MDESC_NODE_NULL) {
  1017. mdesc_release(md);
  1018. return -ENOENT;
  1019. }
  1020. err = grab_mblocks(md);
  1021. if (err < 0)
  1022. goto out;
  1023. err = grab_mlgroups(md);
  1024. if (err < 0)
  1025. goto out;
  1026. count = 0;
  1027. mdesc_for_each_node_by_name(md, node, "group") {
  1028. err = numa_parse_mdesc_group(md, node, count);
  1029. if (err < 0)
  1030. break;
  1031. count++;
  1032. }
  1033. add_node_ranges();
  1034. for (i = 0; i < num_node_masks; i++) {
  1035. allocate_node_data(i);
  1036. node_set_online(i);
  1037. }
  1038. err = 0;
  1039. out:
  1040. mdesc_release(md);
  1041. return err;
  1042. }
  1043. static int __init numa_parse_jbus(void)
  1044. {
  1045. unsigned long cpu, index;
  1046. /* NUMA node id is encoded in bits 36 and higher, and there is
  1047. * a 1-to-1 mapping from CPU ID to NUMA node ID.
  1048. */
  1049. index = 0;
  1050. for_each_present_cpu(cpu) {
  1051. numa_cpu_lookup_table[cpu] = index;
  1052. cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
  1053. node_masks[index].mask = ~((1UL << 36UL) - 1UL);
  1054. node_masks[index].val = cpu << 36UL;
  1055. index++;
  1056. }
  1057. num_node_masks = index;
  1058. add_node_ranges();
  1059. for (index = 0; index < num_node_masks; index++) {
  1060. allocate_node_data(index);
  1061. node_set_online(index);
  1062. }
  1063. return 0;
  1064. }
  1065. static int __init numa_parse_sun4u(void)
  1066. {
  1067. if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1068. unsigned long ver;
  1069. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  1070. if ((ver >> 32UL) == __JALAPENO_ID ||
  1071. (ver >> 32UL) == __SERRANO_ID)
  1072. return numa_parse_jbus();
  1073. }
  1074. return -1;
  1075. }
  1076. static int __init bootmem_init_numa(void)
  1077. {
  1078. int err = -1;
  1079. numadbg("bootmem_init_numa()\n");
  1080. if (numa_enabled) {
  1081. if (tlb_type == hypervisor)
  1082. err = numa_parse_mdesc();
  1083. else
  1084. err = numa_parse_sun4u();
  1085. }
  1086. return err;
  1087. }
  1088. #else
  1089. static int bootmem_init_numa(void)
  1090. {
  1091. return -1;
  1092. }
  1093. #endif
  1094. static void __init bootmem_init_nonnuma(void)
  1095. {
  1096. unsigned long top_of_ram = memblock_end_of_DRAM();
  1097. unsigned long total_ram = memblock_phys_mem_size();
  1098. numadbg("bootmem_init_nonnuma()\n");
  1099. printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
  1100. top_of_ram, total_ram);
  1101. printk(KERN_INFO "Memory hole size: %ldMB\n",
  1102. (top_of_ram - total_ram) >> 20);
  1103. init_node_masks_nonnuma();
  1104. memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
  1105. allocate_node_data(0);
  1106. node_set_online(0);
  1107. }
  1108. static unsigned long __init bootmem_init(unsigned long phys_base)
  1109. {
  1110. unsigned long end_pfn;
  1111. end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
  1112. max_pfn = max_low_pfn = end_pfn;
  1113. min_low_pfn = (phys_base >> PAGE_SHIFT);
  1114. if (bootmem_init_numa() < 0)
  1115. bootmem_init_nonnuma();
  1116. /* Dump memblock with node info. */
  1117. memblock_dump_all();
  1118. /* XXX cpu notifier XXX */
  1119. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  1120. sparse_init();
  1121. return end_pfn;
  1122. }
  1123. static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
  1124. static int pall_ents __initdata;
  1125. static unsigned long max_phys_bits = 40;
  1126. bool kern_addr_valid(unsigned long addr)
  1127. {
  1128. pgd_t *pgd;
  1129. pud_t *pud;
  1130. pmd_t *pmd;
  1131. pte_t *pte;
  1132. if ((long)addr < 0L) {
  1133. unsigned long pa = __pa(addr);
  1134. if ((addr >> max_phys_bits) != 0UL)
  1135. return false;
  1136. return pfn_valid(pa >> PAGE_SHIFT);
  1137. }
  1138. if (addr >= (unsigned long) KERNBASE &&
  1139. addr < (unsigned long)&_end)
  1140. return true;
  1141. pgd = pgd_offset_k(addr);
  1142. if (pgd_none(*pgd))
  1143. return 0;
  1144. pud = pud_offset(pgd, addr);
  1145. if (pud_none(*pud))
  1146. return 0;
  1147. if (pud_large(*pud))
  1148. return pfn_valid(pud_pfn(*pud));
  1149. pmd = pmd_offset(pud, addr);
  1150. if (pmd_none(*pmd))
  1151. return 0;
  1152. if (pmd_large(*pmd))
  1153. return pfn_valid(pmd_pfn(*pmd));
  1154. pte = pte_offset_kernel(pmd, addr);
  1155. if (pte_none(*pte))
  1156. return 0;
  1157. return pfn_valid(pte_pfn(*pte));
  1158. }
  1159. EXPORT_SYMBOL(kern_addr_valid);
  1160. static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
  1161. unsigned long vend,
  1162. pud_t *pud)
  1163. {
  1164. const unsigned long mask16gb = (1UL << 34) - 1UL;
  1165. u64 pte_val = vstart;
  1166. /* Each PUD is 8GB */
  1167. if ((vstart & mask16gb) ||
  1168. (vend - vstart <= mask16gb)) {
  1169. pte_val ^= kern_linear_pte_xor[2];
  1170. pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
  1171. return vstart + PUD_SIZE;
  1172. }
  1173. pte_val ^= kern_linear_pte_xor[3];
  1174. pte_val |= _PAGE_PUD_HUGE;
  1175. vend = vstart + mask16gb + 1UL;
  1176. while (vstart < vend) {
  1177. pud_val(*pud) = pte_val;
  1178. pte_val += PUD_SIZE;
  1179. vstart += PUD_SIZE;
  1180. pud++;
  1181. }
  1182. return vstart;
  1183. }
  1184. static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
  1185. bool guard)
  1186. {
  1187. if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
  1188. return true;
  1189. return false;
  1190. }
  1191. static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
  1192. unsigned long vend,
  1193. pmd_t *pmd)
  1194. {
  1195. const unsigned long mask256mb = (1UL << 28) - 1UL;
  1196. const unsigned long mask2gb = (1UL << 31) - 1UL;
  1197. u64 pte_val = vstart;
  1198. /* Each PMD is 8MB */
  1199. if ((vstart & mask256mb) ||
  1200. (vend - vstart <= mask256mb)) {
  1201. pte_val ^= kern_linear_pte_xor[0];
  1202. pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
  1203. return vstart + PMD_SIZE;
  1204. }
  1205. if ((vstart & mask2gb) ||
  1206. (vend - vstart <= mask2gb)) {
  1207. pte_val ^= kern_linear_pte_xor[1];
  1208. pte_val |= _PAGE_PMD_HUGE;
  1209. vend = vstart + mask256mb + 1UL;
  1210. } else {
  1211. pte_val ^= kern_linear_pte_xor[2];
  1212. pte_val |= _PAGE_PMD_HUGE;
  1213. vend = vstart + mask2gb + 1UL;
  1214. }
  1215. while (vstart < vend) {
  1216. pmd_val(*pmd) = pte_val;
  1217. pte_val += PMD_SIZE;
  1218. vstart += PMD_SIZE;
  1219. pmd++;
  1220. }
  1221. return vstart;
  1222. }
  1223. static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
  1224. bool guard)
  1225. {
  1226. if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
  1227. return true;
  1228. return false;
  1229. }
  1230. static unsigned long __ref kernel_map_range(unsigned long pstart,
  1231. unsigned long pend, pgprot_t prot,
  1232. bool use_huge)
  1233. {
  1234. unsigned long vstart = PAGE_OFFSET + pstart;
  1235. unsigned long vend = PAGE_OFFSET + pend;
  1236. unsigned long alloc_bytes = 0UL;
  1237. if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
  1238. prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
  1239. vstart, vend);
  1240. prom_halt();
  1241. }
  1242. while (vstart < vend) {
  1243. unsigned long this_end, paddr = __pa(vstart);
  1244. pgd_t *pgd = pgd_offset_k(vstart);
  1245. pud_t *pud;
  1246. pmd_t *pmd;
  1247. pte_t *pte;
  1248. if (pgd_none(*pgd)) {
  1249. pud_t *new;
  1250. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1251. alloc_bytes += PAGE_SIZE;
  1252. pgd_populate(&init_mm, pgd, new);
  1253. }
  1254. pud = pud_offset(pgd, vstart);
  1255. if (pud_none(*pud)) {
  1256. pmd_t *new;
  1257. if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
  1258. vstart = kernel_map_hugepud(vstart, vend, pud);
  1259. continue;
  1260. }
  1261. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1262. alloc_bytes += PAGE_SIZE;
  1263. pud_populate(&init_mm, pud, new);
  1264. }
  1265. pmd = pmd_offset(pud, vstart);
  1266. if (pmd_none(*pmd)) {
  1267. pte_t *new;
  1268. if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
  1269. vstart = kernel_map_hugepmd(vstart, vend, pmd);
  1270. continue;
  1271. }
  1272. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1273. alloc_bytes += PAGE_SIZE;
  1274. pmd_populate_kernel(&init_mm, pmd, new);
  1275. }
  1276. pte = pte_offset_kernel(pmd, vstart);
  1277. this_end = (vstart + PMD_SIZE) & PMD_MASK;
  1278. if (this_end > vend)
  1279. this_end = vend;
  1280. while (vstart < this_end) {
  1281. pte_val(*pte) = (paddr | pgprot_val(prot));
  1282. vstart += PAGE_SIZE;
  1283. paddr += PAGE_SIZE;
  1284. pte++;
  1285. }
  1286. }
  1287. return alloc_bytes;
  1288. }
  1289. static void __init flush_all_kernel_tsbs(void)
  1290. {
  1291. int i;
  1292. for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
  1293. struct tsb *ent = &swapper_tsb[i];
  1294. ent->tag = (1UL << TSB_TAG_INVALID_BIT);
  1295. }
  1296. #ifndef CONFIG_DEBUG_PAGEALLOC
  1297. for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
  1298. struct tsb *ent = &swapper_4m_tsb[i];
  1299. ent->tag = (1UL << TSB_TAG_INVALID_BIT);
  1300. }
  1301. #endif
  1302. }
  1303. extern unsigned int kvmap_linear_patch[1];
  1304. static void __init kernel_physical_mapping_init(void)
  1305. {
  1306. unsigned long i, mem_alloced = 0UL;
  1307. bool use_huge = true;
  1308. #ifdef CONFIG_DEBUG_PAGEALLOC
  1309. use_huge = false;
  1310. #endif
  1311. for (i = 0; i < pall_ents; i++) {
  1312. unsigned long phys_start, phys_end;
  1313. phys_start = pall[i].phys_addr;
  1314. phys_end = phys_start + pall[i].reg_size;
  1315. mem_alloced += kernel_map_range(phys_start, phys_end,
  1316. PAGE_KERNEL, use_huge);
  1317. }
  1318. printk("Allocated %ld bytes for kernel page tables.\n",
  1319. mem_alloced);
  1320. kvmap_linear_patch[0] = 0x01000000; /* nop */
  1321. flushi(&kvmap_linear_patch[0]);
  1322. flush_all_kernel_tsbs();
  1323. __flush_tlb_all();
  1324. }
  1325. #ifdef CONFIG_DEBUG_PAGEALLOC
  1326. void __kernel_map_pages(struct page *page, int numpages, int enable)
  1327. {
  1328. unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
  1329. unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
  1330. kernel_map_range(phys_start, phys_end,
  1331. (enable ? PAGE_KERNEL : __pgprot(0)), false);
  1332. flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
  1333. PAGE_OFFSET + phys_end);
  1334. /* we should perform an IPI and flush all tlbs,
  1335. * but that can deadlock->flush only current cpu.
  1336. */
  1337. __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
  1338. PAGE_OFFSET + phys_end);
  1339. }
  1340. #endif
  1341. unsigned long __init find_ecache_flush_span(unsigned long size)
  1342. {
  1343. int i;
  1344. for (i = 0; i < pavail_ents; i++) {
  1345. if (pavail[i].reg_size >= size)
  1346. return pavail[i].phys_addr;
  1347. }
  1348. return ~0UL;
  1349. }
  1350. unsigned long PAGE_OFFSET;
  1351. EXPORT_SYMBOL(PAGE_OFFSET);
  1352. unsigned long VMALLOC_END = 0x0000010000000000UL;
  1353. EXPORT_SYMBOL(VMALLOC_END);
  1354. unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
  1355. unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
  1356. static void __init setup_page_offset(void)
  1357. {
  1358. if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1359. /* Cheetah/Panther support a full 64-bit virtual
  1360. * address, so we can use all that our page tables
  1361. * support.
  1362. */
  1363. sparc64_va_hole_top = 0xfff0000000000000UL;
  1364. sparc64_va_hole_bottom = 0x0010000000000000UL;
  1365. max_phys_bits = 42;
  1366. } else if (tlb_type == hypervisor) {
  1367. switch (sun4v_chip_type) {
  1368. case SUN4V_CHIP_NIAGARA1:
  1369. case SUN4V_CHIP_NIAGARA2:
  1370. /* T1 and T2 support 48-bit virtual addresses. */
  1371. sparc64_va_hole_top = 0xffff800000000000UL;
  1372. sparc64_va_hole_bottom = 0x0000800000000000UL;
  1373. max_phys_bits = 39;
  1374. break;
  1375. case SUN4V_CHIP_NIAGARA3:
  1376. /* T3 supports 48-bit virtual addresses. */
  1377. sparc64_va_hole_top = 0xffff800000000000UL;
  1378. sparc64_va_hole_bottom = 0x0000800000000000UL;
  1379. max_phys_bits = 43;
  1380. break;
  1381. case SUN4V_CHIP_NIAGARA4:
  1382. case SUN4V_CHIP_NIAGARA5:
  1383. case SUN4V_CHIP_SPARC64X:
  1384. case SUN4V_CHIP_SPARC_M6:
  1385. /* T4 and later support 52-bit virtual addresses. */
  1386. sparc64_va_hole_top = 0xfff8000000000000UL;
  1387. sparc64_va_hole_bottom = 0x0008000000000000UL;
  1388. max_phys_bits = 47;
  1389. break;
  1390. case SUN4V_CHIP_SPARC_M7:
  1391. default:
  1392. /* M7 and later support 52-bit virtual addresses. */
  1393. sparc64_va_hole_top = 0xfff8000000000000UL;
  1394. sparc64_va_hole_bottom = 0x0008000000000000UL;
  1395. max_phys_bits = 49;
  1396. break;
  1397. }
  1398. }
  1399. if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
  1400. prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
  1401. max_phys_bits);
  1402. prom_halt();
  1403. }
  1404. PAGE_OFFSET = sparc64_va_hole_top;
  1405. VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
  1406. (sparc64_va_hole_bottom >> 2));
  1407. pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
  1408. PAGE_OFFSET, max_phys_bits);
  1409. pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
  1410. VMALLOC_START, VMALLOC_END);
  1411. pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
  1412. VMEMMAP_BASE, VMEMMAP_BASE << 1);
  1413. }
  1414. static void __init tsb_phys_patch(void)
  1415. {
  1416. struct tsb_ldquad_phys_patch_entry *pquad;
  1417. struct tsb_phys_patch_entry *p;
  1418. pquad = &__tsb_ldquad_phys_patch;
  1419. while (pquad < &__tsb_ldquad_phys_patch_end) {
  1420. unsigned long addr = pquad->addr;
  1421. if (tlb_type == hypervisor)
  1422. *(unsigned int *) addr = pquad->sun4v_insn;
  1423. else
  1424. *(unsigned int *) addr = pquad->sun4u_insn;
  1425. wmb();
  1426. __asm__ __volatile__("flush %0"
  1427. : /* no outputs */
  1428. : "r" (addr));
  1429. pquad++;
  1430. }
  1431. p = &__tsb_phys_patch;
  1432. while (p < &__tsb_phys_patch_end) {
  1433. unsigned long addr = p->addr;
  1434. *(unsigned int *) addr = p->insn;
  1435. wmb();
  1436. __asm__ __volatile__("flush %0"
  1437. : /* no outputs */
  1438. : "r" (addr));
  1439. p++;
  1440. }
  1441. }
  1442. /* Don't mark as init, we give this to the Hypervisor. */
  1443. #ifndef CONFIG_DEBUG_PAGEALLOC
  1444. #define NUM_KTSB_DESCR 2
  1445. #else
  1446. #define NUM_KTSB_DESCR 1
  1447. #endif
  1448. static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
  1449. /* The swapper TSBs are loaded with a base sequence of:
  1450. *
  1451. * sethi %uhi(SYMBOL), REG1
  1452. * sethi %hi(SYMBOL), REG2
  1453. * or REG1, %ulo(SYMBOL), REG1
  1454. * or REG2, %lo(SYMBOL), REG2
  1455. * sllx REG1, 32, REG1
  1456. * or REG1, REG2, REG1
  1457. *
  1458. * When we use physical addressing for the TSB accesses, we patch the
  1459. * first four instructions in the above sequence.
  1460. */
  1461. static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
  1462. {
  1463. unsigned long high_bits, low_bits;
  1464. high_bits = (pa >> 32) & 0xffffffff;
  1465. low_bits = (pa >> 0) & 0xffffffff;
  1466. while (start < end) {
  1467. unsigned int *ia = (unsigned int *)(unsigned long)*start;
  1468. ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
  1469. __asm__ __volatile__("flush %0" : : "r" (ia));
  1470. ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
  1471. __asm__ __volatile__("flush %0" : : "r" (ia + 1));
  1472. ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
  1473. __asm__ __volatile__("flush %0" : : "r" (ia + 2));
  1474. ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
  1475. __asm__ __volatile__("flush %0" : : "r" (ia + 3));
  1476. start++;
  1477. }
  1478. }
  1479. static void ktsb_phys_patch(void)
  1480. {
  1481. extern unsigned int __swapper_tsb_phys_patch;
  1482. extern unsigned int __swapper_tsb_phys_patch_end;
  1483. unsigned long ktsb_pa;
  1484. ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
  1485. patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
  1486. &__swapper_tsb_phys_patch_end, ktsb_pa);
  1487. #ifndef CONFIG_DEBUG_PAGEALLOC
  1488. {
  1489. extern unsigned int __swapper_4m_tsb_phys_patch;
  1490. extern unsigned int __swapper_4m_tsb_phys_patch_end;
  1491. ktsb_pa = (kern_base +
  1492. ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
  1493. patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
  1494. &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
  1495. }
  1496. #endif
  1497. }
  1498. static void __init sun4v_ktsb_init(void)
  1499. {
  1500. unsigned long ktsb_pa;
  1501. /* First KTSB for PAGE_SIZE mappings. */
  1502. ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
  1503. switch (PAGE_SIZE) {
  1504. case 8 * 1024:
  1505. default:
  1506. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
  1507. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
  1508. break;
  1509. case 64 * 1024:
  1510. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
  1511. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
  1512. break;
  1513. case 512 * 1024:
  1514. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
  1515. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
  1516. break;
  1517. case 4 * 1024 * 1024:
  1518. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
  1519. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
  1520. break;
  1521. }
  1522. ktsb_descr[0].assoc = 1;
  1523. ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
  1524. ktsb_descr[0].ctx_idx = 0;
  1525. ktsb_descr[0].tsb_base = ktsb_pa;
  1526. ktsb_descr[0].resv = 0;
  1527. #ifndef CONFIG_DEBUG_PAGEALLOC
  1528. /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
  1529. ktsb_pa = (kern_base +
  1530. ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
  1531. ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
  1532. ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
  1533. HV_PGSZ_MASK_256MB |
  1534. HV_PGSZ_MASK_2GB |
  1535. HV_PGSZ_MASK_16GB) &
  1536. cpu_pgsz_mask);
  1537. ktsb_descr[1].assoc = 1;
  1538. ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
  1539. ktsb_descr[1].ctx_idx = 0;
  1540. ktsb_descr[1].tsb_base = ktsb_pa;
  1541. ktsb_descr[1].resv = 0;
  1542. #endif
  1543. }
  1544. void sun4v_ktsb_register(void)
  1545. {
  1546. unsigned long pa, ret;
  1547. pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
  1548. ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
  1549. if (ret != 0) {
  1550. prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
  1551. "errors with %lx\n", pa, ret);
  1552. prom_halt();
  1553. }
  1554. }
  1555. static void __init sun4u_linear_pte_xor_finalize(void)
  1556. {
  1557. #ifndef CONFIG_DEBUG_PAGEALLOC
  1558. /* This is where we would add Panther support for
  1559. * 32MB and 256MB pages.
  1560. */
  1561. #endif
  1562. }
  1563. static void __init sun4v_linear_pte_xor_finalize(void)
  1564. {
  1565. unsigned long pagecv_flag;
  1566. /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
  1567. * enables MCD error. Do not set bit 9 on M7 processor.
  1568. */
  1569. switch (sun4v_chip_type) {
  1570. case SUN4V_CHIP_SPARC_M7:
  1571. pagecv_flag = 0x00;
  1572. break;
  1573. default:
  1574. pagecv_flag = _PAGE_CV_4V;
  1575. break;
  1576. }
  1577. #ifndef CONFIG_DEBUG_PAGEALLOC
  1578. if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
  1579. kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
  1580. PAGE_OFFSET;
  1581. kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
  1582. _PAGE_P_4V | _PAGE_W_4V);
  1583. } else {
  1584. kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
  1585. }
  1586. if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
  1587. kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
  1588. PAGE_OFFSET;
  1589. kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
  1590. _PAGE_P_4V | _PAGE_W_4V);
  1591. } else {
  1592. kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
  1593. }
  1594. if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
  1595. kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
  1596. PAGE_OFFSET;
  1597. kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
  1598. _PAGE_P_4V | _PAGE_W_4V);
  1599. } else {
  1600. kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
  1601. }
  1602. #endif
  1603. }
  1604. /* paging_init() sets up the page tables */
  1605. static unsigned long last_valid_pfn;
  1606. static void sun4u_pgprot_init(void);
  1607. static void sun4v_pgprot_init(void);
  1608. static phys_addr_t __init available_memory(void)
  1609. {
  1610. phys_addr_t available = 0ULL;
  1611. phys_addr_t pa_start, pa_end;
  1612. u64 i;
  1613. for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL)
  1614. available = available + (pa_end - pa_start);
  1615. return available;
  1616. }
  1617. #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
  1618. #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
  1619. #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
  1620. #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
  1621. #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
  1622. #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
  1623. /* We need to exclude reserved regions. This exclusion will include
  1624. * vmlinux and initrd. To be more precise the initrd size could be used to
  1625. * compute a new lower limit because it is freed later during initialization.
  1626. */
  1627. static void __init reduce_memory(phys_addr_t limit_ram)
  1628. {
  1629. phys_addr_t avail_ram = available_memory();
  1630. phys_addr_t pa_start, pa_end;
  1631. u64 i;
  1632. if (limit_ram >= avail_ram)
  1633. return;
  1634. for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) {
  1635. phys_addr_t region_size = pa_end - pa_start;
  1636. phys_addr_t clip_start = pa_start;
  1637. avail_ram = avail_ram - region_size;
  1638. /* Are we consuming too much? */
  1639. if (avail_ram < limit_ram) {
  1640. phys_addr_t give_back = limit_ram - avail_ram;
  1641. region_size = region_size - give_back;
  1642. clip_start = clip_start + give_back;
  1643. }
  1644. memblock_remove(clip_start, region_size);
  1645. if (avail_ram <= limit_ram)
  1646. break;
  1647. i = 0UL;
  1648. }
  1649. }
  1650. void __init paging_init(void)
  1651. {
  1652. unsigned long end_pfn, shift, phys_base;
  1653. unsigned long real_end, i;
  1654. int node;
  1655. setup_page_offset();
  1656. /* These build time checkes make sure that the dcache_dirty_cpu()
  1657. * page->flags usage will work.
  1658. *
  1659. * When a page gets marked as dcache-dirty, we store the
  1660. * cpu number starting at bit 32 in the page->flags. Also,
  1661. * functions like clear_dcache_dirty_cpu use the cpu mask
  1662. * in 13-bit signed-immediate instruction fields.
  1663. */
  1664. /*
  1665. * Page flags must not reach into upper 32 bits that are used
  1666. * for the cpu number
  1667. */
  1668. BUILD_BUG_ON(NR_PAGEFLAGS > 32);
  1669. /*
  1670. * The bit fields placed in the high range must not reach below
  1671. * the 32 bit boundary. Otherwise we cannot place the cpu field
  1672. * at the 32 bit boundary.
  1673. */
  1674. BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
  1675. ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
  1676. BUILD_BUG_ON(NR_CPUS > 4096);
  1677. kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
  1678. kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
  1679. /* Invalidate both kernel TSBs. */
  1680. memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
  1681. #ifndef CONFIG_DEBUG_PAGEALLOC
  1682. memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
  1683. #endif
  1684. /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
  1685. * bit on M7 processor. This is a conflicting usage of the same
  1686. * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
  1687. * Detection error on all pages and this will lead to problems
  1688. * later. Kernel does not run with MCD enabled and hence rest
  1689. * of the required steps to fully configure memory corruption
  1690. * detection are not taken. We need to ensure TTE.mcde is not
  1691. * set on M7 processor. Compute the value of cacheability
  1692. * flag for use later taking this into consideration.
  1693. */
  1694. switch (sun4v_chip_type) {
  1695. case SUN4V_CHIP_SPARC_M7:
  1696. page_cache4v_flag = _PAGE_CP_4V;
  1697. break;
  1698. default:
  1699. page_cache4v_flag = _PAGE_CACHE_4V;
  1700. break;
  1701. }
  1702. if (tlb_type == hypervisor)
  1703. sun4v_pgprot_init();
  1704. else
  1705. sun4u_pgprot_init();
  1706. if (tlb_type == cheetah_plus ||
  1707. tlb_type == hypervisor) {
  1708. tsb_phys_patch();
  1709. ktsb_phys_patch();
  1710. }
  1711. if (tlb_type == hypervisor)
  1712. sun4v_patch_tlb_handlers();
  1713. /* Find available physical memory...
  1714. *
  1715. * Read it twice in order to work around a bug in openfirmware.
  1716. * The call to grab this table itself can cause openfirmware to
  1717. * allocate memory, which in turn can take away some space from
  1718. * the list of available memory. Reading it twice makes sure
  1719. * we really do get the final value.
  1720. */
  1721. read_obp_translations();
  1722. read_obp_memory("reg", &pall[0], &pall_ents);
  1723. read_obp_memory("available", &pavail[0], &pavail_ents);
  1724. read_obp_memory("available", &pavail[0], &pavail_ents);
  1725. phys_base = 0xffffffffffffffffUL;
  1726. for (i = 0; i < pavail_ents; i++) {
  1727. phys_base = min(phys_base, pavail[i].phys_addr);
  1728. memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
  1729. }
  1730. memblock_reserve(kern_base, kern_size);
  1731. find_ramdisk(phys_base);
  1732. if (cmdline_memory_size)
  1733. reduce_memory(cmdline_memory_size);
  1734. memblock_allow_resize();
  1735. memblock_dump_all();
  1736. set_bit(0, mmu_context_bmap);
  1737. shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
  1738. real_end = (unsigned long)_end;
  1739. num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
  1740. printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
  1741. num_kernel_image_mappings);
  1742. /* Set kernel pgd to upper alias so physical page computations
  1743. * work.
  1744. */
  1745. init_mm.pgd += ((shift) / (sizeof(pgd_t)));
  1746. memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
  1747. inherit_prom_mappings();
  1748. /* Ok, we can use our TLB miss and window trap handlers safely. */
  1749. setup_tba();
  1750. __flush_tlb_all();
  1751. prom_build_devicetree();
  1752. of_populate_present_mask();
  1753. #ifndef CONFIG_SMP
  1754. of_fill_in_cpu_data();
  1755. #endif
  1756. if (tlb_type == hypervisor) {
  1757. sun4v_mdesc_init();
  1758. mdesc_populate_present_mask(cpu_all_mask);
  1759. #ifndef CONFIG_SMP
  1760. mdesc_fill_in_cpu_data(cpu_all_mask);
  1761. #endif
  1762. mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
  1763. sun4v_linear_pte_xor_finalize();
  1764. sun4v_ktsb_init();
  1765. sun4v_ktsb_register();
  1766. } else {
  1767. unsigned long impl, ver;
  1768. cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
  1769. HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
  1770. __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
  1771. impl = ((ver >> 32) & 0xffff);
  1772. if (impl == PANTHER_IMPL)
  1773. cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
  1774. HV_PGSZ_MASK_256MB);
  1775. sun4u_linear_pte_xor_finalize();
  1776. }
  1777. /* Flush the TLBs and the 4M TSB so that the updated linear
  1778. * pte XOR settings are realized for all mappings.
  1779. */
  1780. __flush_tlb_all();
  1781. #ifndef CONFIG_DEBUG_PAGEALLOC
  1782. memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
  1783. #endif
  1784. __flush_tlb_all();
  1785. /* Setup bootmem... */
  1786. last_valid_pfn = end_pfn = bootmem_init(phys_base);
  1787. /* Once the OF device tree and MDESC have been setup, we know
  1788. * the list of possible cpus. Therefore we can allocate the
  1789. * IRQ stacks.
  1790. */
  1791. for_each_possible_cpu(i) {
  1792. node = cpu_to_node(i);
  1793. softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
  1794. THREAD_SIZE,
  1795. THREAD_SIZE, 0);
  1796. hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
  1797. THREAD_SIZE,
  1798. THREAD_SIZE, 0);
  1799. }
  1800. kernel_physical_mapping_init();
  1801. {
  1802. unsigned long max_zone_pfns[MAX_NR_ZONES];
  1803. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  1804. max_zone_pfns[ZONE_NORMAL] = end_pfn;
  1805. free_area_init_nodes(max_zone_pfns);
  1806. }
  1807. printk("Booting Linux...\n");
  1808. }
  1809. int page_in_phys_avail(unsigned long paddr)
  1810. {
  1811. int i;
  1812. paddr &= PAGE_MASK;
  1813. for (i = 0; i < pavail_ents; i++) {
  1814. unsigned long start, end;
  1815. start = pavail[i].phys_addr;
  1816. end = start + pavail[i].reg_size;
  1817. if (paddr >= start && paddr < end)
  1818. return 1;
  1819. }
  1820. if (paddr >= kern_base && paddr < (kern_base + kern_size))
  1821. return 1;
  1822. #ifdef CONFIG_BLK_DEV_INITRD
  1823. if (paddr >= __pa(initrd_start) &&
  1824. paddr < __pa(PAGE_ALIGN(initrd_end)))
  1825. return 1;
  1826. #endif
  1827. return 0;
  1828. }
  1829. static void __init register_page_bootmem_info(void)
  1830. {
  1831. #ifdef CONFIG_NEED_MULTIPLE_NODES
  1832. int i;
  1833. for_each_online_node(i)
  1834. if (NODE_DATA(i)->node_spanned_pages)
  1835. register_page_bootmem_info_node(NODE_DATA(i));
  1836. #endif
  1837. }
  1838. void __init mem_init(void)
  1839. {
  1840. high_memory = __va(last_valid_pfn << PAGE_SHIFT);
  1841. register_page_bootmem_info();
  1842. free_all_bootmem();
  1843. /*
  1844. * Set up the zero page, mark it reserved, so that page count
  1845. * is not manipulated when freeing the page from user ptes.
  1846. */
  1847. mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
  1848. if (mem_map_zero == NULL) {
  1849. prom_printf("paging_init: Cannot alloc zero page.\n");
  1850. prom_halt();
  1851. }
  1852. mark_page_reserved(mem_map_zero);
  1853. mem_init_print_info(NULL);
  1854. if (tlb_type == cheetah || tlb_type == cheetah_plus)
  1855. cheetah_ecache_flush_init();
  1856. }
  1857. void free_initmem(void)
  1858. {
  1859. unsigned long addr, initend;
  1860. int do_free = 1;
  1861. /* If the physical memory maps were trimmed by kernel command
  1862. * line options, don't even try freeing this initmem stuff up.
  1863. * The kernel image could have been in the trimmed out region
  1864. * and if so the freeing below will free invalid page structs.
  1865. */
  1866. if (cmdline_memory_size)
  1867. do_free = 0;
  1868. /*
  1869. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
  1870. */
  1871. addr = PAGE_ALIGN((unsigned long)(__init_begin));
  1872. initend = (unsigned long)(__init_end) & PAGE_MASK;
  1873. for (; addr < initend; addr += PAGE_SIZE) {
  1874. unsigned long page;
  1875. page = (addr +
  1876. ((unsigned long) __va(kern_base)) -
  1877. ((unsigned long) KERNBASE));
  1878. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  1879. if (do_free)
  1880. free_reserved_page(virt_to_page(page));
  1881. }
  1882. }
  1883. #ifdef CONFIG_BLK_DEV_INITRD
  1884. void free_initrd_mem(unsigned long start, unsigned long end)
  1885. {
  1886. free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
  1887. "initrd");
  1888. }
  1889. #endif
  1890. pgprot_t PAGE_KERNEL __read_mostly;
  1891. EXPORT_SYMBOL(PAGE_KERNEL);
  1892. pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
  1893. pgprot_t PAGE_COPY __read_mostly;
  1894. pgprot_t PAGE_SHARED __read_mostly;
  1895. EXPORT_SYMBOL(PAGE_SHARED);
  1896. unsigned long pg_iobits __read_mostly;
  1897. unsigned long _PAGE_IE __read_mostly;
  1898. EXPORT_SYMBOL(_PAGE_IE);
  1899. unsigned long _PAGE_E __read_mostly;
  1900. EXPORT_SYMBOL(_PAGE_E);
  1901. unsigned long _PAGE_CACHE __read_mostly;
  1902. EXPORT_SYMBOL(_PAGE_CACHE);
  1903. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  1904. int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
  1905. int node)
  1906. {
  1907. unsigned long pte_base;
  1908. pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
  1909. _PAGE_CP_4U | _PAGE_CV_4U |
  1910. _PAGE_P_4U | _PAGE_W_4U);
  1911. if (tlb_type == hypervisor)
  1912. pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
  1913. page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
  1914. pte_base |= _PAGE_PMD_HUGE;
  1915. vstart = vstart & PMD_MASK;
  1916. vend = ALIGN(vend, PMD_SIZE);
  1917. for (; vstart < vend; vstart += PMD_SIZE) {
  1918. pgd_t *pgd = pgd_offset_k(vstart);
  1919. unsigned long pte;
  1920. pud_t *pud;
  1921. pmd_t *pmd;
  1922. if (pgd_none(*pgd)) {
  1923. pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
  1924. if (!new)
  1925. return -ENOMEM;
  1926. pgd_populate(&init_mm, pgd, new);
  1927. }
  1928. pud = pud_offset(pgd, vstart);
  1929. if (pud_none(*pud)) {
  1930. pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
  1931. if (!new)
  1932. return -ENOMEM;
  1933. pud_populate(&init_mm, pud, new);
  1934. }
  1935. pmd = pmd_offset(pud, vstart);
  1936. pte = pmd_val(*pmd);
  1937. if (!(pte & _PAGE_VALID)) {
  1938. void *block = vmemmap_alloc_block(PMD_SIZE, node);
  1939. if (!block)
  1940. return -ENOMEM;
  1941. pmd_val(*pmd) = pte_base | __pa(block);
  1942. }
  1943. }
  1944. return 0;
  1945. }
  1946. void vmemmap_free(unsigned long start, unsigned long end)
  1947. {
  1948. }
  1949. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  1950. static void prot_init_common(unsigned long page_none,
  1951. unsigned long page_shared,
  1952. unsigned long page_copy,
  1953. unsigned long page_readonly,
  1954. unsigned long page_exec_bit)
  1955. {
  1956. PAGE_COPY = __pgprot(page_copy);
  1957. PAGE_SHARED = __pgprot(page_shared);
  1958. protection_map[0x0] = __pgprot(page_none);
  1959. protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
  1960. protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
  1961. protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
  1962. protection_map[0x4] = __pgprot(page_readonly);
  1963. protection_map[0x5] = __pgprot(page_readonly);
  1964. protection_map[0x6] = __pgprot(page_copy);
  1965. protection_map[0x7] = __pgprot(page_copy);
  1966. protection_map[0x8] = __pgprot(page_none);
  1967. protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
  1968. protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
  1969. protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
  1970. protection_map[0xc] = __pgprot(page_readonly);
  1971. protection_map[0xd] = __pgprot(page_readonly);
  1972. protection_map[0xe] = __pgprot(page_shared);
  1973. protection_map[0xf] = __pgprot(page_shared);
  1974. }
  1975. static void __init sun4u_pgprot_init(void)
  1976. {
  1977. unsigned long page_none, page_shared, page_copy, page_readonly;
  1978. unsigned long page_exec_bit;
  1979. int i;
  1980. PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
  1981. _PAGE_CACHE_4U | _PAGE_P_4U |
  1982. __ACCESS_BITS_4U | __DIRTY_BITS_4U |
  1983. _PAGE_EXEC_4U);
  1984. PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
  1985. _PAGE_CACHE_4U | _PAGE_P_4U |
  1986. __ACCESS_BITS_4U | __DIRTY_BITS_4U |
  1987. _PAGE_EXEC_4U | _PAGE_L_4U);
  1988. _PAGE_IE = _PAGE_IE_4U;
  1989. _PAGE_E = _PAGE_E_4U;
  1990. _PAGE_CACHE = _PAGE_CACHE_4U;
  1991. pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
  1992. __ACCESS_BITS_4U | _PAGE_E_4U);
  1993. #ifdef CONFIG_DEBUG_PAGEALLOC
  1994. kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
  1995. #else
  1996. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
  1997. PAGE_OFFSET;
  1998. #endif
  1999. kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
  2000. _PAGE_P_4U | _PAGE_W_4U);
  2001. for (i = 1; i < 4; i++)
  2002. kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
  2003. _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
  2004. _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
  2005. _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
  2006. page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
  2007. page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  2008. __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
  2009. page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  2010. __ACCESS_BITS_4U | _PAGE_EXEC_4U);
  2011. page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  2012. __ACCESS_BITS_4U | _PAGE_EXEC_4U);
  2013. page_exec_bit = _PAGE_EXEC_4U;
  2014. prot_init_common(page_none, page_shared, page_copy, page_readonly,
  2015. page_exec_bit);
  2016. }
  2017. static void __init sun4v_pgprot_init(void)
  2018. {
  2019. unsigned long page_none, page_shared, page_copy, page_readonly;
  2020. unsigned long page_exec_bit;
  2021. int i;
  2022. PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
  2023. page_cache4v_flag | _PAGE_P_4V |
  2024. __ACCESS_BITS_4V | __DIRTY_BITS_4V |
  2025. _PAGE_EXEC_4V);
  2026. PAGE_KERNEL_LOCKED = PAGE_KERNEL;
  2027. _PAGE_IE = _PAGE_IE_4V;
  2028. _PAGE_E = _PAGE_E_4V;
  2029. _PAGE_CACHE = page_cache4v_flag;
  2030. #ifdef CONFIG_DEBUG_PAGEALLOC
  2031. kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
  2032. #else
  2033. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
  2034. PAGE_OFFSET;
  2035. #endif
  2036. kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
  2037. _PAGE_W_4V);
  2038. for (i = 1; i < 4; i++)
  2039. kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
  2040. pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
  2041. __ACCESS_BITS_4V | _PAGE_E_4V);
  2042. _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
  2043. _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
  2044. _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
  2045. _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
  2046. page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
  2047. page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
  2048. __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
  2049. page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
  2050. __ACCESS_BITS_4V | _PAGE_EXEC_4V);
  2051. page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
  2052. __ACCESS_BITS_4V | _PAGE_EXEC_4V);
  2053. page_exec_bit = _PAGE_EXEC_4V;
  2054. prot_init_common(page_none, page_shared, page_copy, page_readonly,
  2055. page_exec_bit);
  2056. }
  2057. unsigned long pte_sz_bits(unsigned long sz)
  2058. {
  2059. if (tlb_type == hypervisor) {
  2060. switch (sz) {
  2061. case 8 * 1024:
  2062. default:
  2063. return _PAGE_SZ8K_4V;
  2064. case 64 * 1024:
  2065. return _PAGE_SZ64K_4V;
  2066. case 512 * 1024:
  2067. return _PAGE_SZ512K_4V;
  2068. case 4 * 1024 * 1024:
  2069. return _PAGE_SZ4MB_4V;
  2070. }
  2071. } else {
  2072. switch (sz) {
  2073. case 8 * 1024:
  2074. default:
  2075. return _PAGE_SZ8K_4U;
  2076. case 64 * 1024:
  2077. return _PAGE_SZ64K_4U;
  2078. case 512 * 1024:
  2079. return _PAGE_SZ512K_4U;
  2080. case 4 * 1024 * 1024:
  2081. return _PAGE_SZ4MB_4U;
  2082. }
  2083. }
  2084. }
  2085. pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
  2086. {
  2087. pte_t pte;
  2088. pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
  2089. pte_val(pte) |= (((unsigned long)space) << 32);
  2090. pte_val(pte) |= pte_sz_bits(page_size);
  2091. return pte;
  2092. }
  2093. static unsigned long kern_large_tte(unsigned long paddr)
  2094. {
  2095. unsigned long val;
  2096. val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
  2097. _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
  2098. _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
  2099. if (tlb_type == hypervisor)
  2100. val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
  2101. page_cache4v_flag | _PAGE_P_4V |
  2102. _PAGE_EXEC_4V | _PAGE_W_4V);
  2103. return val | paddr;
  2104. }
  2105. /* If not locked, zap it. */
  2106. void __flush_tlb_all(void)
  2107. {
  2108. unsigned long pstate;
  2109. int i;
  2110. __asm__ __volatile__("flushw\n\t"
  2111. "rdpr %%pstate, %0\n\t"
  2112. "wrpr %0, %1, %%pstate"
  2113. : "=r" (pstate)
  2114. : "i" (PSTATE_IE));
  2115. if (tlb_type == hypervisor) {
  2116. sun4v_mmu_demap_all();
  2117. } else if (tlb_type == spitfire) {
  2118. for (i = 0; i < 64; i++) {
  2119. /* Spitfire Errata #32 workaround */
  2120. /* NOTE: Always runs on spitfire, so no
  2121. * cheetah+ page size encodings.
  2122. */
  2123. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  2124. "flush %%g6"
  2125. : /* No outputs */
  2126. : "r" (0),
  2127. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  2128. if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
  2129. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  2130. "membar #Sync"
  2131. : /* no outputs */
  2132. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  2133. spitfire_put_dtlb_data(i, 0x0UL);
  2134. }
  2135. /* Spitfire Errata #32 workaround */
  2136. /* NOTE: Always runs on spitfire, so no
  2137. * cheetah+ page size encodings.
  2138. */
  2139. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  2140. "flush %%g6"
  2141. : /* No outputs */
  2142. : "r" (0),
  2143. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  2144. if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
  2145. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  2146. "membar #Sync"
  2147. : /* no outputs */
  2148. : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  2149. spitfire_put_itlb_data(i, 0x0UL);
  2150. }
  2151. }
  2152. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  2153. cheetah_flush_dtlb_all();
  2154. cheetah_flush_itlb_all();
  2155. }
  2156. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  2157. : : "r" (pstate));
  2158. }
  2159. pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  2160. unsigned long address)
  2161. {
  2162. struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
  2163. __GFP_REPEAT | __GFP_ZERO);
  2164. pte_t *pte = NULL;
  2165. if (page)
  2166. pte = (pte_t *) page_address(page);
  2167. return pte;
  2168. }
  2169. pgtable_t pte_alloc_one(struct mm_struct *mm,
  2170. unsigned long address)
  2171. {
  2172. struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
  2173. __GFP_REPEAT | __GFP_ZERO);
  2174. if (!page)
  2175. return NULL;
  2176. if (!pgtable_page_ctor(page)) {
  2177. free_hot_cold_page(page, 0);
  2178. return NULL;
  2179. }
  2180. return (pte_t *) page_address(page);
  2181. }
  2182. void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  2183. {
  2184. free_page((unsigned long)pte);
  2185. }
  2186. static void __pte_free(pgtable_t pte)
  2187. {
  2188. struct page *page = virt_to_page(pte);
  2189. pgtable_page_dtor(page);
  2190. __free_page(page);
  2191. }
  2192. void pte_free(struct mm_struct *mm, pgtable_t pte)
  2193. {
  2194. __pte_free(pte);
  2195. }
  2196. void pgtable_free(void *table, bool is_page)
  2197. {
  2198. if (is_page)
  2199. __pte_free(table);
  2200. else
  2201. kmem_cache_free(pgtable_cache, table);
  2202. }
  2203. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2204. void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
  2205. pmd_t *pmd)
  2206. {
  2207. unsigned long pte, flags;
  2208. struct mm_struct *mm;
  2209. pmd_t entry = *pmd;
  2210. if (!pmd_large(entry) || !pmd_young(entry))
  2211. return;
  2212. pte = pmd_val(entry);
  2213. /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
  2214. if (!(pte & _PAGE_VALID))
  2215. return;
  2216. /* We are fabricating 8MB pages using 4MB real hw pages. */
  2217. pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
  2218. mm = vma->vm_mm;
  2219. spin_lock_irqsave(&mm->context.lock, flags);
  2220. if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
  2221. __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
  2222. addr, pte);
  2223. spin_unlock_irqrestore(&mm->context.lock, flags);
  2224. }
  2225. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  2226. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  2227. static void context_reload(void *__data)
  2228. {
  2229. struct mm_struct *mm = __data;
  2230. if (mm == current->mm)
  2231. load_secondary_context(mm);
  2232. }
  2233. void hugetlb_setup(struct pt_regs *regs)
  2234. {
  2235. struct mm_struct *mm = current->mm;
  2236. struct tsb_config *tp;
  2237. if (in_atomic() || !mm) {
  2238. const struct exception_table_entry *entry;
  2239. entry = search_exception_tables(regs->tpc);
  2240. if (entry) {
  2241. regs->tpc = entry->fixup;
  2242. regs->tnpc = regs->tpc + 4;
  2243. return;
  2244. }
  2245. pr_alert("Unexpected HugeTLB setup in atomic context.\n");
  2246. die_if_kernel("HugeTSB in atomic", regs);
  2247. }
  2248. tp = &mm->context.tsb_block[MM_TSB_HUGE];
  2249. if (likely(tp->tsb == NULL))
  2250. tsb_grow(mm, MM_TSB_HUGE, 0);
  2251. tsb_context_switch(mm);
  2252. smp_tsb_sync(mm);
  2253. /* On UltraSPARC-III+ and later, configure the second half of
  2254. * the Data-TLB for huge pages.
  2255. */
  2256. if (tlb_type == cheetah_plus) {
  2257. unsigned long ctx;
  2258. spin_lock(&ctx_alloc_lock);
  2259. ctx = mm->context.sparc64_ctx_val;
  2260. ctx &= ~CTX_PGSZ_MASK;
  2261. ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
  2262. ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
  2263. if (ctx != mm->context.sparc64_ctx_val) {
  2264. /* When changing the page size fields, we
  2265. * must perform a context flush so that no
  2266. * stale entries match. This flush must
  2267. * occur with the original context register
  2268. * settings.
  2269. */
  2270. do_flush_tlb_mm(mm);
  2271. /* Reload the context register of all processors
  2272. * also executing in this address space.
  2273. */
  2274. mm->context.sparc64_ctx_val = ctx;
  2275. on_each_cpu(context_reload, mm, 0);
  2276. }
  2277. spin_unlock(&ctx_alloc_lock);
  2278. }
  2279. }
  2280. #endif
  2281. static struct resource code_resource = {
  2282. .name = "Kernel code",
  2283. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  2284. };
  2285. static struct resource data_resource = {
  2286. .name = "Kernel data",
  2287. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  2288. };
  2289. static struct resource bss_resource = {
  2290. .name = "Kernel bss",
  2291. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  2292. };
  2293. static inline resource_size_t compute_kern_paddr(void *addr)
  2294. {
  2295. return (resource_size_t) (addr - KERNBASE + kern_base);
  2296. }
  2297. static void __init kernel_lds_init(void)
  2298. {
  2299. code_resource.start = compute_kern_paddr(_text);
  2300. code_resource.end = compute_kern_paddr(_etext - 1);
  2301. data_resource.start = compute_kern_paddr(_etext);
  2302. data_resource.end = compute_kern_paddr(_edata - 1);
  2303. bss_resource.start = compute_kern_paddr(__bss_start);
  2304. bss_resource.end = compute_kern_paddr(_end - 1);
  2305. }
  2306. static int __init report_memory(void)
  2307. {
  2308. int i;
  2309. struct resource *res;
  2310. kernel_lds_init();
  2311. for (i = 0; i < pavail_ents; i++) {
  2312. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  2313. if (!res) {
  2314. pr_warn("Failed to allocate source.\n");
  2315. break;
  2316. }
  2317. res->name = "System RAM";
  2318. res->start = pavail[i].phys_addr;
  2319. res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
  2320. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  2321. if (insert_resource(&iomem_resource, res) < 0) {
  2322. pr_warn("Resource insertion failed.\n");
  2323. break;
  2324. }
  2325. insert_resource(res, &code_resource);
  2326. insert_resource(res, &data_resource);
  2327. insert_resource(res, &bss_resource);
  2328. }
  2329. return 0;
  2330. }
  2331. arch_initcall(report_memory);
  2332. #ifdef CONFIG_SMP
  2333. #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
  2334. #else
  2335. #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
  2336. #endif
  2337. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  2338. {
  2339. if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
  2340. if (start < LOW_OBP_ADDRESS) {
  2341. flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
  2342. do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
  2343. }
  2344. if (end > HI_OBP_ADDRESS) {
  2345. flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
  2346. do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
  2347. }
  2348. } else {
  2349. flush_tsb_kernel_range(start, end);
  2350. do_flush_tlb_kernel_range(start, end);
  2351. }
  2352. }