pda.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. #ifndef X86_64_PDA_H
  2. #define X86_64_PDA_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/stddef.h>
  5. #include <linux/types.h>
  6. #include <linux/cache.h>
  7. #include <asm/page.h>
  8. /* Per processor datastructure. %gs points to it while the kernel runs */
  9. struct x8664_pda {
  10. struct task_struct *pcurrent; /* 0 Current process */
  11. unsigned long data_offset; /* 8 Per cpu data offset from linker
  12. address */
  13. unsigned long kernelstack; /* 16 top of kernel stack for current */
  14. unsigned long oldrsp; /* 24 user rsp for system call */
  15. int irqcount; /* 32 Irq nesting counter. Starts -1 */
  16. unsigned int cpunumber; /* 36 Logical CPU number */
  17. unsigned long stack_canary; /* 40 stack canary value */
  18. /* gcc-ABI: this canary MUST be at
  19. offset 40!!! */
  20. char *irqstackptr;
  21. unsigned int __softirq_pending;
  22. unsigned int __nmi_count; /* number of NMI on this CPUs */
  23. short mmu_state;
  24. short isidle;
  25. struct mm_struct *active_mm;
  26. unsigned apic_timer_irqs;
  27. unsigned irq0_irqs;
  28. unsigned irq_resched_count;
  29. unsigned irq_call_count;
  30. unsigned irq_tlb_count;
  31. unsigned irq_thermal_count;
  32. unsigned irq_threshold_count;
  33. unsigned irq_spurious_count;
  34. } ____cacheline_aligned_in_smp;
  35. extern struct x8664_pda *_cpu_pda[];
  36. extern struct x8664_pda boot_cpu_pda[];
  37. extern void pda_init(int);
  38. #define cpu_pda(i) (_cpu_pda[i])
  39. /*
  40. * There is no fast way to get the base address of the PDA, all the accesses
  41. * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
  42. */
  43. extern void __bad_pda_field(void) __attribute__((noreturn));
  44. /*
  45. * proxy_pda doesn't actually exist, but tell gcc it is accessed for
  46. * all PDA accesses so it gets read/write dependencies right.
  47. */
  48. extern struct x8664_pda _proxy_pda;
  49. #define pda_offset(field) offsetof(struct x8664_pda, field)
  50. #define pda_to_op(op, field, val) \
  51. do { \
  52. typedef typeof(_proxy_pda.field) T__; \
  53. if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
  54. switch (sizeof(_proxy_pda.field)) { \
  55. case 2: \
  56. asm(op "w %1,%%gs:%c2" : \
  57. "+m" (_proxy_pda.field) : \
  58. "ri" ((T__)val), \
  59. "i"(pda_offset(field))); \
  60. break; \
  61. case 4: \
  62. asm(op "l %1,%%gs:%c2" : \
  63. "+m" (_proxy_pda.field) : \
  64. "ri" ((T__)val), \
  65. "i" (pda_offset(field))); \
  66. break; \
  67. case 8: \
  68. asm(op "q %1,%%gs:%c2": \
  69. "+m" (_proxy_pda.field) : \
  70. "ri" ((T__)val), \
  71. "i"(pda_offset(field))); \
  72. break; \
  73. default: \
  74. __bad_pda_field(); \
  75. } \
  76. } while (0)
  77. #define pda_from_op(op, field) \
  78. ({ \
  79. typeof(_proxy_pda.field) ret__; \
  80. switch (sizeof(_proxy_pda.field)) { \
  81. case 2: \
  82. asm(op "w %%gs:%c1,%0" : \
  83. "=r" (ret__) : \
  84. "i" (pda_offset(field)), \
  85. "m" (_proxy_pda.field)); \
  86. break; \
  87. case 4: \
  88. asm(op "l %%gs:%c1,%0": \
  89. "=r" (ret__): \
  90. "i" (pda_offset(field)), \
  91. "m" (_proxy_pda.field)); \
  92. break; \
  93. case 8: \
  94. asm(op "q %%gs:%c1,%0": \
  95. "=r" (ret__) : \
  96. "i" (pda_offset(field)), \
  97. "m" (_proxy_pda.field)); \
  98. break; \
  99. default: \
  100. __bad_pda_field(); \
  101. } \
  102. ret__; \
  103. })
  104. #define read_pda(field) pda_from_op("mov", field)
  105. #define write_pda(field, val) pda_to_op("mov", field, val)
  106. #define add_pda(field, val) pda_to_op("add", field, val)
  107. #define sub_pda(field, val) pda_to_op("sub", field, val)
  108. #define or_pda(field, val) pda_to_op("or", field, val)
  109. /* This is not atomic against other CPUs -- CPU preemption needs to be off */
  110. #define test_and_clear_bit_pda(bit, field) \
  111. ({ \
  112. int old__; \
  113. asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
  114. : "=r" (old__), "+m" (_proxy_pda.field) \
  115. : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
  116. old__; \
  117. })
  118. #endif
  119. #define PDA_STACKOFFSET (5*8)
  120. #define refresh_stack_canary() write_pda(stack_canary, current->stack_canary)
  121. #endif