system.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3. */
  4. #ifndef __PPC_SYSTEM_H
  5. #define __PPC_SYSTEM_H
  6. #include <linux/config.h>
  7. #include <linux/kernel.h>
  8. #include <asm/atomic.h>
  9. #include <asm/hw_irq.h>
  10. /*
  11. * Memory barrier.
  12. * The sync instruction guarantees that all memory accesses initiated
  13. * by this processor have been performed (with respect to all other
  14. * mechanisms that access memory). The eieio instruction is a barrier
  15. * providing an ordering (separately) for (a) cacheable stores and (b)
  16. * loads and stores to non-cacheable memory (e.g. I/O devices).
  17. *
  18. * mb() prevents loads and stores being reordered across this point.
  19. * rmb() prevents loads being reordered across this point.
  20. * wmb() prevents stores being reordered across this point.
  21. * read_barrier_depends() prevents data-dependent loads being reordered
  22. * across this point (nop on PPC).
  23. *
  24. * We can use the eieio instruction for wmb, but since it doesn't
  25. * give any ordering guarantees about loads, we have to use the
  26. * stronger but slower sync instruction for mb and rmb.
  27. */
  28. #define mb() __asm__ __volatile__ ("sync" : : : "memory")
  29. #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
  30. #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
  31. #define read_barrier_depends() do { } while(0)
  32. #define set_mb(var, value) do { var = value; mb(); } while (0)
  33. #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  34. #ifdef CONFIG_SMP
  35. #define smp_mb() mb()
  36. #define smp_rmb() rmb()
  37. #define smp_wmb() wmb()
  38. #define smp_read_barrier_depends() read_barrier_depends()
  39. #else
  40. #define smp_mb() barrier()
  41. #define smp_rmb() barrier()
  42. #define smp_wmb() barrier()
  43. #define smp_read_barrier_depends() do { } while(0)
  44. #endif /* CONFIG_SMP */
  45. #ifdef __KERNEL__
  46. struct task_struct;
  47. struct pt_regs;
  48. extern void print_backtrace(unsigned long *);
  49. extern void show_regs(struct pt_regs * regs);
  50. extern void flush_instruction_cache(void);
  51. extern void hard_reset_now(void);
  52. extern void poweroff_now(void);
  53. #ifdef CONFIG_6xx
  54. extern long _get_L2CR(void);
  55. extern long _get_L3CR(void);
  56. extern void _set_L2CR(unsigned long);
  57. extern void _set_L3CR(unsigned long);
  58. #else
  59. #define _get_L2CR() 0L
  60. #define _get_L3CR() 0L
  61. #define _set_L2CR(val) do { } while(0)
  62. #define _set_L3CR(val) do { } while(0)
  63. #endif
  64. extern void via_cuda_init(void);
  65. extern void pmac_nvram_init(void);
  66. extern void chrp_nvram_init(void);
  67. extern void read_rtc_time(void);
  68. extern void pmac_find_display(void);
  69. extern void giveup_fpu(struct task_struct *);
  70. extern void enable_kernel_fp(void);
  71. extern void flush_fp_to_thread(struct task_struct *);
  72. extern void enable_kernel_altivec(void);
  73. extern void giveup_altivec(struct task_struct *);
  74. extern void load_up_altivec(struct task_struct *);
  75. extern int emulate_altivec(struct pt_regs *);
  76. extern void giveup_spe(struct task_struct *);
  77. extern void load_up_spe(struct task_struct *);
  78. extern int fix_alignment(struct pt_regs *);
  79. extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
  80. extern void cvt_df(double *from, float *to, struct thread_struct *thread);
  81. #ifdef CONFIG_ALTIVEC
  82. extern void flush_altivec_to_thread(struct task_struct *);
  83. #else
  84. static inline void flush_altivec_to_thread(struct task_struct *t)
  85. {
  86. }
  87. #endif
  88. #ifdef CONFIG_SPE
  89. extern void flush_spe_to_thread(struct task_struct *);
  90. #else
  91. static inline void flush_spe_to_thread(struct task_struct *t)
  92. {
  93. }
  94. #endif
  95. extern int call_rtas(const char *, int, int, unsigned long *, ...);
  96. extern void cacheable_memzero(void *p, unsigned int nb);
  97. extern void *cacheable_memcpy(void *, const void *, unsigned int);
  98. extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
  99. extern void bad_page_fault(struct pt_regs *, unsigned long, int);
  100. extern int die(const char *, struct pt_regs *, long);
  101. extern void _exception(int, struct pt_regs *, int, unsigned long);
  102. void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
  103. #ifdef CONFIG_BOOKE_WDT
  104. extern u32 booke_wdt_enabled;
  105. extern u32 booke_wdt_period;
  106. #endif /* CONFIG_BOOKE_WDT */
  107. struct device_node;
  108. extern void note_scsi_host(struct device_node *, void *);
  109. extern struct task_struct *__switch_to(struct task_struct *,
  110. struct task_struct *);
  111. #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
  112. /*
  113. * On SMP systems, when the scheduler does migration-cost autodetection,
  114. * it needs a way to flush as much of the CPU's caches as possible.
  115. *
  116. * TODO: fill this in!
  117. */
  118. static inline void sched_cacheflush(void)
  119. {
  120. }
  121. struct thread_struct;
  122. extern struct task_struct *_switch(struct thread_struct *prev,
  123. struct thread_struct *next);
  124. extern unsigned int rtas_data;
  125. static __inline__ unsigned long
  126. xchg_u32(volatile void *p, unsigned long val)
  127. {
  128. unsigned long prev;
  129. __asm__ __volatile__ ("\n\
  130. 1: lwarx %0,0,%2 \n"
  131. PPC405_ERR77(0,%2)
  132. " stwcx. %3,0,%2 \n\
  133. bne- 1b"
  134. : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
  135. : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
  136. : "cc", "memory");
  137. return prev;
  138. }
  139. /*
  140. * This function doesn't exist, so you'll get a linker error
  141. * if something tries to do an invalid xchg().
  142. */
  143. extern void __xchg_called_with_bad_pointer(void);
  144. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  145. #define tas(ptr) (xchg((ptr),1))
  146. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  147. {
  148. switch (size) {
  149. case 4:
  150. return (unsigned long) xchg_u32(ptr, x);
  151. #if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
  152. case 8:
  153. return (unsigned long) xchg_u64(ptr, x);
  154. #endif /* 0 */
  155. }
  156. __xchg_called_with_bad_pointer();
  157. return x;
  158. }
  159. extern inline void * xchg_ptr(void * m, void * val)
  160. {
  161. return (void *) xchg_u32(m, (unsigned long) val);
  162. }
  163. #define __HAVE_ARCH_CMPXCHG 1
  164. static __inline__ unsigned long
  165. __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
  166. {
  167. unsigned int prev;
  168. __asm__ __volatile__ ("\n\
  169. 1: lwarx %0,0,%2 \n\
  170. cmpw 0,%0,%3 \n\
  171. bne 2f \n"
  172. PPC405_ERR77(0,%2)
  173. " stwcx. %4,0,%2 \n\
  174. bne- 1b\n"
  175. #ifdef CONFIG_SMP
  176. " sync\n"
  177. #endif /* CONFIG_SMP */
  178. "2:"
  179. : "=&r" (prev), "=m" (*p)
  180. : "r" (p), "r" (old), "r" (new), "m" (*p)
  181. : "cc", "memory");
  182. return prev;
  183. }
  184. /* This function doesn't exist, so you'll get a linker error
  185. if something tries to do an invalid cmpxchg(). */
  186. extern void __cmpxchg_called_with_bad_pointer(void);
  187. static __inline__ unsigned long
  188. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
  189. {
  190. switch (size) {
  191. case 4:
  192. return __cmpxchg_u32(ptr, old, new);
  193. #if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
  194. case 8:
  195. return __cmpxchg_u64(ptr, old, new);
  196. #endif /* 0 */
  197. }
  198. __cmpxchg_called_with_bad_pointer();
  199. return old;
  200. }
  201. #define cmpxchg(ptr,o,n) \
  202. ({ \
  203. __typeof__(*(ptr)) _o_ = (o); \
  204. __typeof__(*(ptr)) _n_ = (n); \
  205. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  206. (unsigned long)_n_, sizeof(*(ptr))); \
  207. })
  208. #define arch_align_stack(x) (x)
  209. #endif /* __KERNEL__ */
  210. #endif /* __PPC_SYSTEM_H */