pmb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. /*
  2. * arch/sh/mm/pmb.c
  3. *
  4. * Privileged Space Mapping Buffer (PMB) Support.
  5. *
  6. * Copyright (C) 2005 - 2010 Paul Mundt
  7. * Copyright (C) 2010 Matt Fleming
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sysdev.h>
  16. #include <linux/cpu.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/bitops.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/fs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/vmalloc.h>
  27. #include <asm/sizes.h>
  28. #include <asm/system.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/page.h>
  32. #include <asm/mmu.h>
  33. #include <asm/mmu_context.h>
  34. struct pmb_entry;
  35. struct pmb_entry {
  36. unsigned long vpn;
  37. unsigned long ppn;
  38. unsigned long flags;
  39. unsigned long size;
  40. spinlock_t lock;
  41. /*
  42. * 0 .. NR_PMB_ENTRIES for specific entry selection, or
  43. * PMB_NO_ENTRY to search for a free one
  44. */
  45. int entry;
  46. /* Adjacent entry link for contiguous multi-entry mappings */
  47. struct pmb_entry *link;
  48. };
  49. static struct {
  50. unsigned long size;
  51. int flag;
  52. } pmb_sizes[] = {
  53. { .size = SZ_512M, .flag = PMB_SZ_512M, },
  54. { .size = SZ_128M, .flag = PMB_SZ_128M, },
  55. { .size = SZ_64M, .flag = PMB_SZ_64M, },
  56. { .size = SZ_16M, .flag = PMB_SZ_16M, },
  57. };
  58. static void pmb_unmap_entry(struct pmb_entry *, int depth);
  59. static DEFINE_RWLOCK(pmb_rwlock);
  60. static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
  61. static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
  62. static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
  63. {
  64. return (entry & PMB_E_MASK) << PMB_E_SHIFT;
  65. }
  66. static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
  67. {
  68. return mk_pmb_entry(entry) | PMB_ADDR;
  69. }
  70. static __always_inline unsigned long mk_pmb_data(unsigned int entry)
  71. {
  72. return mk_pmb_entry(entry) | PMB_DATA;
  73. }
  74. static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
  75. {
  76. return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
  77. }
  78. /*
  79. * Ensure that the PMB entries match our cache configuration.
  80. *
  81. * When we are in 32-bit address extended mode, CCR.CB becomes
  82. * invalid, so care must be taken to manually adjust cacheable
  83. * translations.
  84. */
  85. static __always_inline unsigned long pmb_cache_flags(void)
  86. {
  87. unsigned long flags = 0;
  88. #if defined(CONFIG_CACHE_OFF)
  89. flags |= PMB_WT | PMB_UB;
  90. #elif defined(CONFIG_CACHE_WRITETHROUGH)
  91. flags |= PMB_C | PMB_WT | PMB_UB;
  92. #elif defined(CONFIG_CACHE_WRITEBACK)
  93. flags |= PMB_C;
  94. #endif
  95. return flags;
  96. }
  97. /*
  98. * Convert typical pgprot value to the PMB equivalent
  99. */
  100. static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
  101. {
  102. unsigned long pmb_flags = 0;
  103. u64 flags = pgprot_val(prot);
  104. if (flags & _PAGE_CACHABLE)
  105. pmb_flags |= PMB_C;
  106. if (flags & _PAGE_WT)
  107. pmb_flags |= PMB_WT | PMB_UB;
  108. return pmb_flags;
  109. }
  110. static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
  111. {
  112. return (b->vpn == (a->vpn + a->size)) &&
  113. (b->ppn == (a->ppn + a->size)) &&
  114. (b->flags == a->flags);
  115. }
  116. static bool pmb_size_valid(unsigned long size)
  117. {
  118. int i;
  119. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  120. if (pmb_sizes[i].size == size)
  121. return true;
  122. return false;
  123. }
  124. static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
  125. {
  126. return (addr >= P1SEG && (addr + size - 1) < P3SEG);
  127. }
  128. static inline bool pmb_prot_valid(pgprot_t prot)
  129. {
  130. return (pgprot_val(prot) & _PAGE_USER) == 0;
  131. }
  132. static int pmb_size_to_flags(unsigned long size)
  133. {
  134. int i;
  135. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  136. if (pmb_sizes[i].size == size)
  137. return pmb_sizes[i].flag;
  138. return 0;
  139. }
  140. static int pmb_alloc_entry(void)
  141. {
  142. int pos;
  143. pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
  144. if (pos >= 0 && pos < NR_PMB_ENTRIES)
  145. __set_bit(pos, pmb_map);
  146. else
  147. pos = -ENOSPC;
  148. return pos;
  149. }
  150. static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
  151. unsigned long flags, int entry)
  152. {
  153. struct pmb_entry *pmbe;
  154. unsigned long irqflags;
  155. void *ret = NULL;
  156. int pos;
  157. write_lock_irqsave(&pmb_rwlock, irqflags);
  158. if (entry == PMB_NO_ENTRY) {
  159. pos = pmb_alloc_entry();
  160. if (unlikely(pos < 0)) {
  161. ret = ERR_PTR(pos);
  162. goto out;
  163. }
  164. } else {
  165. if (__test_and_set_bit(entry, pmb_map)) {
  166. ret = ERR_PTR(-ENOSPC);
  167. goto out;
  168. }
  169. pos = entry;
  170. }
  171. write_unlock_irqrestore(&pmb_rwlock, irqflags);
  172. pmbe = &pmb_entry_list[pos];
  173. memset(pmbe, 0, sizeof(struct pmb_entry));
  174. spin_lock_init(&pmbe->lock);
  175. pmbe->vpn = vpn;
  176. pmbe->ppn = ppn;
  177. pmbe->flags = flags;
  178. pmbe->entry = pos;
  179. return pmbe;
  180. out:
  181. write_unlock_irqrestore(&pmb_rwlock, irqflags);
  182. return ret;
  183. }
  184. static void pmb_free(struct pmb_entry *pmbe)
  185. {
  186. __clear_bit(pmbe->entry, pmb_map);
  187. pmbe->entry = PMB_NO_ENTRY;
  188. pmbe->link = NULL;
  189. }
  190. /*
  191. * Must be run uncached.
  192. */
  193. static void __set_pmb_entry(struct pmb_entry *pmbe)
  194. {
  195. /* Set V-bit */
  196. __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
  197. __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
  198. }
  199. static void __clear_pmb_entry(struct pmb_entry *pmbe)
  200. {
  201. unsigned long addr, data;
  202. unsigned long addr_val, data_val;
  203. addr = mk_pmb_addr(pmbe->entry);
  204. data = mk_pmb_data(pmbe->entry);
  205. addr_val = __raw_readl(addr);
  206. data_val = __raw_readl(data);
  207. /* Clear V-bit */
  208. writel_uncached(addr_val & ~PMB_V, addr);
  209. writel_uncached(data_val & ~PMB_V, data);
  210. }
  211. static void set_pmb_entry(struct pmb_entry *pmbe)
  212. {
  213. unsigned long flags;
  214. spin_lock_irqsave(&pmbe->lock, flags);
  215. __set_pmb_entry(pmbe);
  216. spin_unlock_irqrestore(&pmbe->lock, flags);
  217. }
  218. int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
  219. unsigned long size, pgprot_t prot)
  220. {
  221. return 0;
  222. }
  223. void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
  224. pgprot_t prot, void *caller)
  225. {
  226. struct pmb_entry *pmbp, *pmbe;
  227. unsigned long pmb_flags;
  228. int i, mapped;
  229. unsigned long orig_addr, vaddr;
  230. phys_addr_t offset, last_addr;
  231. phys_addr_t align_mask;
  232. unsigned long aligned;
  233. struct vm_struct *area;
  234. /*
  235. * Small mappings need to go through the TLB.
  236. */
  237. if (size < SZ_16M)
  238. return ERR_PTR(-EINVAL);
  239. if (!pmb_prot_valid(prot))
  240. return ERR_PTR(-EINVAL);
  241. pmbp = NULL;
  242. pmb_flags = pgprot_to_pmb_flags(prot);
  243. mapped = 0;
  244. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  245. if (size >= pmb_sizes[i].size)
  246. break;
  247. last_addr = phys + size;
  248. align_mask = ~(pmb_sizes[i].size - 1);
  249. offset = phys & ~align_mask;
  250. phys &= align_mask;
  251. aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
  252. area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
  253. P3SEG, caller);
  254. if (!area)
  255. return NULL;
  256. area->phys_addr = phys;
  257. orig_addr = vaddr = (unsigned long)area->addr;
  258. if (!pmb_addr_valid(vaddr, aligned))
  259. return ERR_PTR(-EFAULT);
  260. again:
  261. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
  262. unsigned long flags;
  263. if (size < pmb_sizes[i].size)
  264. continue;
  265. pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
  266. PMB_NO_ENTRY);
  267. if (IS_ERR(pmbe)) {
  268. pmb_unmap_entry(pmbp, mapped);
  269. return pmbe;
  270. }
  271. spin_lock_irqsave(&pmbe->lock, flags);
  272. pmbe->size = pmb_sizes[i].size;
  273. __set_pmb_entry(pmbe);
  274. phys += pmbe->size;
  275. vaddr += pmbe->size;
  276. size -= pmbe->size;
  277. /*
  278. * Link adjacent entries that span multiple PMB entries
  279. * for easier tear-down.
  280. */
  281. if (likely(pmbp)) {
  282. spin_lock(&pmbp->lock);
  283. pmbp->link = pmbe;
  284. spin_unlock(&pmbp->lock);
  285. }
  286. pmbp = pmbe;
  287. /*
  288. * Instead of trying smaller sizes on every iteration
  289. * (even if we succeed in allocating space), try using
  290. * pmb_sizes[i].size again.
  291. */
  292. i--;
  293. mapped++;
  294. spin_unlock_irqrestore(&pmbe->lock, flags);
  295. }
  296. if (size >= SZ_16M)
  297. goto again;
  298. return (void __iomem *)(offset + (char *)orig_addr);
  299. }
  300. int pmb_unmap(void __iomem *addr)
  301. {
  302. struct pmb_entry *pmbe = NULL;
  303. unsigned long vaddr = (unsigned long __force)addr;
  304. int i, found = 0;
  305. read_lock(&pmb_rwlock);
  306. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  307. if (test_bit(i, pmb_map)) {
  308. pmbe = &pmb_entry_list[i];
  309. if (pmbe->vpn == vaddr) {
  310. found = 1;
  311. break;
  312. }
  313. }
  314. }
  315. read_unlock(&pmb_rwlock);
  316. if (found) {
  317. pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
  318. return 0;
  319. }
  320. return -EINVAL;
  321. }
  322. static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
  323. {
  324. do {
  325. struct pmb_entry *pmblink = pmbe;
  326. /*
  327. * We may be called before this pmb_entry has been
  328. * entered into the PMB table via set_pmb_entry(), but
  329. * that's OK because we've allocated a unique slot for
  330. * this entry in pmb_alloc() (even if we haven't filled
  331. * it yet).
  332. *
  333. * Therefore, calling __clear_pmb_entry() is safe as no
  334. * other mapping can be using that slot.
  335. */
  336. __clear_pmb_entry(pmbe);
  337. pmbe = pmblink->link;
  338. pmb_free(pmblink);
  339. } while (pmbe && --depth);
  340. }
  341. static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
  342. {
  343. unsigned long flags;
  344. if (unlikely(!pmbe))
  345. return;
  346. write_lock_irqsave(&pmb_rwlock, flags);
  347. __pmb_unmap_entry(pmbe, depth);
  348. write_unlock_irqrestore(&pmb_rwlock, flags);
  349. }
  350. static void __init pmb_notify(void)
  351. {
  352. int i;
  353. pr_info("PMB: boot mappings:\n");
  354. read_lock(&pmb_rwlock);
  355. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  356. struct pmb_entry *pmbe;
  357. if (!test_bit(i, pmb_map))
  358. continue;
  359. pmbe = &pmb_entry_list[i];
  360. pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
  361. pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
  362. pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
  363. }
  364. read_unlock(&pmb_rwlock);
  365. }
  366. /*
  367. * Sync our software copy of the PMB mappings with those in hardware. The
  368. * mappings in the hardware PMB were either set up by the bootloader or
  369. * very early on by the kernel.
  370. */
  371. static void __init pmb_synchronize(void)
  372. {
  373. struct pmb_entry *pmbp = NULL;
  374. int i, j;
  375. /*
  376. * Run through the initial boot mappings, log the established
  377. * ones, and blow away anything that falls outside of the valid
  378. * PPN range. Specifically, we only care about existing mappings
  379. * that impact the cached/uncached sections.
  380. *
  381. * Note that touching these can be a bit of a minefield; the boot
  382. * loader can establish multi-page mappings with the same caching
  383. * attributes, so we need to ensure that we aren't modifying a
  384. * mapping that we're presently executing from, or may execute
  385. * from in the case of straddling page boundaries.
  386. *
  387. * In the future we will have to tidy up after the boot loader by
  388. * jumping between the cached and uncached mappings and tearing
  389. * down alternating mappings while executing from the other.
  390. */
  391. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  392. unsigned long addr, data;
  393. unsigned long addr_val, data_val;
  394. unsigned long ppn, vpn, flags;
  395. unsigned long irqflags;
  396. unsigned int size;
  397. struct pmb_entry *pmbe;
  398. addr = mk_pmb_addr(i);
  399. data = mk_pmb_data(i);
  400. addr_val = __raw_readl(addr);
  401. data_val = __raw_readl(data);
  402. /*
  403. * Skip over any bogus entries
  404. */
  405. if (!(data_val & PMB_V) || !(addr_val & PMB_V))
  406. continue;
  407. ppn = data_val & PMB_PFN_MASK;
  408. vpn = addr_val & PMB_PFN_MASK;
  409. /*
  410. * Only preserve in-range mappings.
  411. */
  412. if (!pmb_ppn_in_range(ppn)) {
  413. /*
  414. * Invalidate anything out of bounds.
  415. */
  416. writel_uncached(addr_val & ~PMB_V, addr);
  417. writel_uncached(data_val & ~PMB_V, data);
  418. continue;
  419. }
  420. /*
  421. * Update the caching attributes if necessary
  422. */
  423. if (data_val & PMB_C) {
  424. data_val &= ~PMB_CACHE_MASK;
  425. data_val |= pmb_cache_flags();
  426. writel_uncached(data_val, data);
  427. }
  428. size = data_val & PMB_SZ_MASK;
  429. flags = size | (data_val & PMB_CACHE_MASK);
  430. pmbe = pmb_alloc(vpn, ppn, flags, i);
  431. if (IS_ERR(pmbe)) {
  432. WARN_ON_ONCE(1);
  433. continue;
  434. }
  435. spin_lock_irqsave(&pmbe->lock, irqflags);
  436. for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
  437. if (pmb_sizes[j].flag == size)
  438. pmbe->size = pmb_sizes[j].size;
  439. if (pmbp) {
  440. spin_lock(&pmbp->lock);
  441. /*
  442. * Compare the previous entry against the current one to
  443. * see if the entries span a contiguous mapping. If so,
  444. * setup the entry links accordingly. Compound mappings
  445. * are later coalesced.
  446. */
  447. if (pmb_can_merge(pmbp, pmbe))
  448. pmbp->link = pmbe;
  449. spin_unlock(&pmbp->lock);
  450. }
  451. pmbp = pmbe;
  452. spin_unlock_irqrestore(&pmbe->lock, irqflags);
  453. }
  454. }
  455. static void __init pmb_merge(struct pmb_entry *head)
  456. {
  457. unsigned long span, newsize;
  458. struct pmb_entry *tail;
  459. int i = 1, depth = 0;
  460. span = newsize = head->size;
  461. tail = head->link;
  462. while (tail) {
  463. span += tail->size;
  464. if (pmb_size_valid(span)) {
  465. newsize = span;
  466. depth = i;
  467. }
  468. /* This is the end of the line.. */
  469. if (!tail->link)
  470. break;
  471. tail = tail->link;
  472. i++;
  473. }
  474. /*
  475. * The merged page size must be valid.
  476. */
  477. if (!pmb_size_valid(newsize))
  478. return;
  479. head->flags &= ~PMB_SZ_MASK;
  480. head->flags |= pmb_size_to_flags(newsize);
  481. head->size = newsize;
  482. __pmb_unmap_entry(head->link, depth);
  483. __set_pmb_entry(head);
  484. }
  485. static void __init pmb_coalesce(void)
  486. {
  487. unsigned long flags;
  488. int i;
  489. write_lock_irqsave(&pmb_rwlock, flags);
  490. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  491. struct pmb_entry *pmbe;
  492. if (!test_bit(i, pmb_map))
  493. continue;
  494. pmbe = &pmb_entry_list[i];
  495. /*
  496. * We're only interested in compound mappings
  497. */
  498. if (!pmbe->link)
  499. continue;
  500. /*
  501. * Nothing to do if it already uses the largest possible
  502. * page size.
  503. */
  504. if (pmbe->size == SZ_512M)
  505. continue;
  506. pmb_merge(pmbe);
  507. }
  508. write_unlock_irqrestore(&pmb_rwlock, flags);
  509. }
  510. #ifdef CONFIG_UNCACHED_MAPPING
  511. static void __init pmb_resize(void)
  512. {
  513. int i;
  514. /*
  515. * If the uncached mapping was constructed by the kernel, it will
  516. * already be a reasonable size.
  517. */
  518. if (uncached_size == SZ_16M)
  519. return;
  520. read_lock(&pmb_rwlock);
  521. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  522. struct pmb_entry *pmbe;
  523. unsigned long flags;
  524. if (!test_bit(i, pmb_map))
  525. continue;
  526. pmbe = &pmb_entry_list[i];
  527. if (pmbe->vpn != uncached_start)
  528. continue;
  529. /*
  530. * Found it, now resize it.
  531. */
  532. spin_lock_irqsave(&pmbe->lock, flags);
  533. pmbe->size = SZ_16M;
  534. pmbe->flags &= ~PMB_SZ_MASK;
  535. pmbe->flags |= pmb_size_to_flags(pmbe->size);
  536. uncached_resize(pmbe->size);
  537. __set_pmb_entry(pmbe);
  538. spin_unlock_irqrestore(&pmbe->lock, flags);
  539. }
  540. read_lock(&pmb_rwlock);
  541. }
  542. #endif
  543. void __init pmb_init(void)
  544. {
  545. /* Synchronize software state */
  546. pmb_synchronize();
  547. /* Attempt to combine compound mappings */
  548. pmb_coalesce();
  549. #ifdef CONFIG_UNCACHED_MAPPING
  550. /* Resize initial mappings, if necessary */
  551. pmb_resize();
  552. #endif
  553. /* Log them */
  554. pmb_notify();
  555. writel_uncached(0, PMB_IRMCR);
  556. /* Flush out the TLB */
  557. __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
  558. ctrl_barrier();
  559. }
  560. bool __in_29bit_mode(void)
  561. {
  562. return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
  563. }
  564. static int pmb_seq_show(struct seq_file *file, void *iter)
  565. {
  566. int i;
  567. seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
  568. "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
  569. seq_printf(file, "ety vpn ppn size flags\n");
  570. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  571. unsigned long addr, data;
  572. unsigned int size;
  573. char *sz_str = NULL;
  574. addr = __raw_readl(mk_pmb_addr(i));
  575. data = __raw_readl(mk_pmb_data(i));
  576. size = data & PMB_SZ_MASK;
  577. sz_str = (size == PMB_SZ_16M) ? " 16MB":
  578. (size == PMB_SZ_64M) ? " 64MB":
  579. (size == PMB_SZ_128M) ? "128MB":
  580. "512MB";
  581. /* 02: V 0x88 0x08 128MB C CB B */
  582. seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
  583. i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
  584. (addr >> 24) & 0xff, (data >> 24) & 0xff,
  585. sz_str, (data & PMB_C) ? 'C' : ' ',
  586. (data & PMB_WT) ? "WT" : "CB",
  587. (data & PMB_UB) ? "UB" : " B");
  588. }
  589. return 0;
  590. }
  591. static int pmb_debugfs_open(struct inode *inode, struct file *file)
  592. {
  593. return single_open(file, pmb_seq_show, NULL);
  594. }
  595. static const struct file_operations pmb_debugfs_fops = {
  596. .owner = THIS_MODULE,
  597. .open = pmb_debugfs_open,
  598. .read = seq_read,
  599. .llseek = seq_lseek,
  600. .release = single_release,
  601. };
  602. static int __init pmb_debugfs_init(void)
  603. {
  604. struct dentry *dentry;
  605. dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
  606. sh_debugfs_root, NULL, &pmb_debugfs_fops);
  607. if (!dentry)
  608. return -ENOMEM;
  609. if (IS_ERR(dentry))
  610. return PTR_ERR(dentry);
  611. return 0;
  612. }
  613. postcore_initcall(pmb_debugfs_init);
  614. #ifdef CONFIG_PM
  615. static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
  616. {
  617. static pm_message_t prev_state;
  618. int i;
  619. /* Restore the PMB after a resume from hibernation */
  620. if (state.event == PM_EVENT_ON &&
  621. prev_state.event == PM_EVENT_FREEZE) {
  622. struct pmb_entry *pmbe;
  623. read_lock(&pmb_rwlock);
  624. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  625. if (test_bit(i, pmb_map)) {
  626. pmbe = &pmb_entry_list[i];
  627. set_pmb_entry(pmbe);
  628. }
  629. }
  630. read_unlock(&pmb_rwlock);
  631. }
  632. prev_state = state;
  633. return 0;
  634. }
  635. static int pmb_sysdev_resume(struct sys_device *dev)
  636. {
  637. return pmb_sysdev_suspend(dev, PMSG_ON);
  638. }
  639. static struct sysdev_driver pmb_sysdev_driver = {
  640. .suspend = pmb_sysdev_suspend,
  641. .resume = pmb_sysdev_resume,
  642. };
  643. static int __init pmb_sysdev_init(void)
  644. {
  645. return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
  646. }
  647. subsys_initcall(pmb_sysdev_init);
  648. #endif