page-states.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. /*
  2. * Copyright IBM Corp. 2008
  3. *
  4. * Guest page hinting for unused pages.
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/gfp.h>
  13. #include <linux/init.h>
  14. #include <asm/page-states.h>
  15. static int cmma_flag = 1;
  16. static int __init cmma(char *str)
  17. {
  18. char *parm;
  19. parm = strstrip(str);
  20. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  21. cmma_flag = 1;
  22. return 1;
  23. }
  24. cmma_flag = 0;
  25. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  26. return 1;
  27. return 0;
  28. }
  29. __setup("cmma=", cmma);
  30. static inline int cmma_test_essa(void)
  31. {
  32. register unsigned long tmp asm("0") = 0;
  33. register int rc asm("1") = -EOPNOTSUPP;
  34. asm volatile(
  35. " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
  36. "0: la %0,0\n"
  37. "1:\n"
  38. EX_TABLE(0b,1b)
  39. : "+&d" (rc), "+&d" (tmp));
  40. return rc;
  41. }
  42. void __init cmma_init(void)
  43. {
  44. if (!cmma_flag)
  45. return;
  46. if (cmma_test_essa())
  47. cmma_flag = 0;
  48. }
  49. static inline void set_page_unstable(struct page *page, int order)
  50. {
  51. int i, rc;
  52. for (i = 0; i < (1 << order); i++)
  53. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  54. : "=&d" (rc)
  55. : "a" (page_to_phys(page + i)),
  56. "i" (ESSA_SET_UNUSED));
  57. }
  58. void arch_free_page(struct page *page, int order)
  59. {
  60. if (!cmma_flag)
  61. return;
  62. set_page_unstable(page, order);
  63. }
  64. static inline void set_page_stable(struct page *page, int order)
  65. {
  66. int i, rc;
  67. for (i = 0; i < (1 << order); i++)
  68. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  69. : "=&d" (rc)
  70. : "a" (page_to_phys(page + i)),
  71. "i" (ESSA_SET_STABLE));
  72. }
  73. void arch_alloc_page(struct page *page, int order)
  74. {
  75. if (!cmma_flag)
  76. return;
  77. set_page_stable(page, order);
  78. }
  79. void arch_set_page_states(int make_stable)
  80. {
  81. unsigned long flags, order, t;
  82. struct list_head *l;
  83. struct page *page;
  84. struct zone *zone;
  85. if (!cmma_flag)
  86. return;
  87. if (make_stable)
  88. drain_local_pages(NULL);
  89. for_each_populated_zone(zone) {
  90. spin_lock_irqsave(&zone->lock, flags);
  91. for_each_migratetype_order(order, t) {
  92. list_for_each(l, &zone->free_area[order].free_list[t]) {
  93. page = list_entry(l, struct page, lru);
  94. if (make_stable)
  95. set_page_stable(page, order);
  96. else
  97. set_page_unstable(page, order);
  98. }
  99. }
  100. spin_unlock_irqrestore(&zone->lock, flags);
  101. }
  102. }