kvm_host.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * definition for kernel virtual machines on s390
  3. *
  4. * Copyright IBM Corp. 2008, 2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef ASM_KVM_HOST_H
  13. #define ASM_KVM_HOST_H
  14. #include <linux/types.h>
  15. #include <linux/hrtimer.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/kvm_types.h>
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/seqlock.h>
  21. #include <asm/debug.h>
  22. #include <asm/cpu.h>
  23. #include <asm/fpu/api.h>
  24. #include <asm/isc.h>
  25. #define KVM_S390_BSCA_CPU_SLOTS 64
  26. #define KVM_S390_ESCA_CPU_SLOTS 248
  27. #define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
  28. #define KVM_USER_MEM_SLOTS 32
  29. /*
  30. * These seem to be used for allocating ->chip in the routing table,
  31. * which we don't use. 4096 is an out-of-thin-air value. If we need
  32. * to look at ->chip later on, we'll need to revisit this.
  33. */
  34. #define KVM_NR_IRQCHIPS 1
  35. #define KVM_IRQCHIP_NUM_PINS 4096
  36. #define KVM_HALT_POLL_NS_DEFAULT 80000
  37. /* s390-specific vcpu->requests bit members */
  38. #define KVM_REQ_ENABLE_IBS 8
  39. #define KVM_REQ_DISABLE_IBS 9
  40. #define SIGP_CTRL_C 0x80
  41. #define SIGP_CTRL_SCN_MASK 0x3f
  42. union bsca_sigp_ctrl {
  43. __u8 value;
  44. struct {
  45. __u8 c : 1;
  46. __u8 r : 1;
  47. __u8 scn : 6;
  48. };
  49. } __packed;
  50. union esca_sigp_ctrl {
  51. __u16 value;
  52. struct {
  53. __u8 c : 1;
  54. __u8 reserved: 7;
  55. __u8 scn;
  56. };
  57. } __packed;
  58. struct esca_entry {
  59. union esca_sigp_ctrl sigp_ctrl;
  60. __u16 reserved1[3];
  61. __u64 sda;
  62. __u64 reserved2[6];
  63. } __packed;
  64. struct bsca_entry {
  65. __u8 reserved0;
  66. union bsca_sigp_ctrl sigp_ctrl;
  67. __u16 reserved[3];
  68. __u64 sda;
  69. __u64 reserved2[2];
  70. } __attribute__((packed));
  71. union ipte_control {
  72. unsigned long val;
  73. struct {
  74. unsigned long k : 1;
  75. unsigned long kh : 31;
  76. unsigned long kg : 32;
  77. };
  78. };
  79. struct bsca_block {
  80. union ipte_control ipte_control;
  81. __u64 reserved[5];
  82. __u64 mcn;
  83. __u64 reserved2;
  84. struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
  85. } __attribute__((packed));
  86. struct esca_block {
  87. union ipte_control ipte_control;
  88. __u64 reserved1[7];
  89. __u64 mcn[4];
  90. __u64 reserved2[20];
  91. struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
  92. } __packed;
  93. #define CPUSTAT_STOPPED 0x80000000
  94. #define CPUSTAT_WAIT 0x10000000
  95. #define CPUSTAT_ECALL_PEND 0x08000000
  96. #define CPUSTAT_STOP_INT 0x04000000
  97. #define CPUSTAT_IO_INT 0x02000000
  98. #define CPUSTAT_EXT_INT 0x01000000
  99. #define CPUSTAT_RUNNING 0x00800000
  100. #define CPUSTAT_RETAINED 0x00400000
  101. #define CPUSTAT_TIMING_SUB 0x00020000
  102. #define CPUSTAT_SIE_SUB 0x00010000
  103. #define CPUSTAT_RRF 0x00008000
  104. #define CPUSTAT_SLSV 0x00004000
  105. #define CPUSTAT_SLSR 0x00002000
  106. #define CPUSTAT_ZARCH 0x00000800
  107. #define CPUSTAT_MCDS 0x00000100
  108. #define CPUSTAT_SM 0x00000080
  109. #define CPUSTAT_IBS 0x00000040
  110. #define CPUSTAT_GED2 0x00000010
  111. #define CPUSTAT_G 0x00000008
  112. #define CPUSTAT_GED 0x00000004
  113. #define CPUSTAT_J 0x00000002
  114. #define CPUSTAT_P 0x00000001
  115. struct kvm_s390_sie_block {
  116. atomic_t cpuflags; /* 0x0000 */
  117. __u32 : 1; /* 0x0004 */
  118. __u32 prefix : 18;
  119. __u32 : 1;
  120. __u32 ibc : 12;
  121. __u8 reserved08[4]; /* 0x0008 */
  122. #define PROG_IN_SIE (1<<0)
  123. __u32 prog0c; /* 0x000c */
  124. __u8 reserved10[16]; /* 0x0010 */
  125. #define PROG_BLOCK_SIE (1<<0)
  126. #define PROG_REQUEST (1<<1)
  127. atomic_t prog20; /* 0x0020 */
  128. __u8 reserved24[4]; /* 0x0024 */
  129. __u64 cputm; /* 0x0028 */
  130. __u64 ckc; /* 0x0030 */
  131. __u64 epoch; /* 0x0038 */
  132. __u8 reserved40[4]; /* 0x0040 */
  133. #define LCTL_CR0 0x8000
  134. #define LCTL_CR6 0x0200
  135. #define LCTL_CR9 0x0040
  136. #define LCTL_CR10 0x0020
  137. #define LCTL_CR11 0x0010
  138. #define LCTL_CR14 0x0002
  139. __u16 lctl; /* 0x0044 */
  140. __s16 icpua; /* 0x0046 */
  141. #define ICTL_OPEREXC 0x80000000
  142. #define ICTL_PINT 0x20000000
  143. #define ICTL_LPSW 0x00400000
  144. #define ICTL_STCTL 0x00040000
  145. #define ICTL_ISKE 0x00004000
  146. #define ICTL_SSKE 0x00002000
  147. #define ICTL_RRBE 0x00001000
  148. #define ICTL_TPROT 0x00000200
  149. __u32 ictl; /* 0x0048 */
  150. __u32 eca; /* 0x004c */
  151. #define ICPT_INST 0x04
  152. #define ICPT_PROGI 0x08
  153. #define ICPT_INSTPROGI 0x0C
  154. #define ICPT_OPEREXC 0x2C
  155. #define ICPT_PARTEXEC 0x38
  156. #define ICPT_IOINST 0x40
  157. __u8 icptcode; /* 0x0050 */
  158. __u8 icptstatus; /* 0x0051 */
  159. __u16 ihcpu; /* 0x0052 */
  160. __u8 reserved54[2]; /* 0x0054 */
  161. __u16 ipa; /* 0x0056 */
  162. __u32 ipb; /* 0x0058 */
  163. __u32 scaoh; /* 0x005c */
  164. __u8 reserved60; /* 0x0060 */
  165. __u8 ecb; /* 0x0061 */
  166. __u8 ecb2; /* 0x0062 */
  167. #define ECB3_AES 0x04
  168. #define ECB3_DEA 0x08
  169. __u8 ecb3; /* 0x0063 */
  170. __u32 scaol; /* 0x0064 */
  171. __u8 reserved68[4]; /* 0x0068 */
  172. __u32 todpr; /* 0x006c */
  173. __u8 reserved70[32]; /* 0x0070 */
  174. psw_t gpsw; /* 0x0090 */
  175. __u64 gg14; /* 0x00a0 */
  176. __u64 gg15; /* 0x00a8 */
  177. __u8 reservedb0[20]; /* 0x00b0 */
  178. __u16 extcpuaddr; /* 0x00c4 */
  179. __u16 eic; /* 0x00c6 */
  180. __u32 reservedc8; /* 0x00c8 */
  181. __u16 pgmilc; /* 0x00cc */
  182. __u16 iprcc; /* 0x00ce */
  183. __u32 dxc; /* 0x00d0 */
  184. __u16 mcn; /* 0x00d4 */
  185. __u8 perc; /* 0x00d6 */
  186. __u8 peratmid; /* 0x00d7 */
  187. __u64 peraddr; /* 0x00d8 */
  188. __u8 eai; /* 0x00e0 */
  189. __u8 peraid; /* 0x00e1 */
  190. __u8 oai; /* 0x00e2 */
  191. __u8 armid; /* 0x00e3 */
  192. __u8 reservede4[4]; /* 0x00e4 */
  193. __u64 tecmc; /* 0x00e8 */
  194. __u8 reservedf0[12]; /* 0x00f0 */
  195. #define CRYCB_FORMAT1 0x00000001
  196. #define CRYCB_FORMAT2 0x00000003
  197. __u32 crycbd; /* 0x00fc */
  198. __u64 gcr[16]; /* 0x0100 */
  199. __u64 gbea; /* 0x0180 */
  200. __u8 reserved188[24]; /* 0x0188 */
  201. __u32 fac; /* 0x01a0 */
  202. __u8 reserved1a4[20]; /* 0x01a4 */
  203. __u64 cbrlo; /* 0x01b8 */
  204. __u8 reserved1c0[8]; /* 0x01c0 */
  205. __u32 ecd; /* 0x01c8 */
  206. __u8 reserved1cc[18]; /* 0x01cc */
  207. __u64 pp; /* 0x01de */
  208. __u8 reserved1e6[2]; /* 0x01e6 */
  209. __u64 itdba; /* 0x01e8 */
  210. __u64 riccbd; /* 0x01f0 */
  211. __u8 reserved1f8[8]; /* 0x01f8 */
  212. } __attribute__((packed));
  213. struct kvm_s390_itdb {
  214. __u8 data[256];
  215. } __packed;
  216. struct sie_page {
  217. struct kvm_s390_sie_block sie_block;
  218. __u8 reserved200[1024]; /* 0x0200 */
  219. struct kvm_s390_itdb itdb; /* 0x0600 */
  220. __u8 reserved700[2304]; /* 0x0700 */
  221. } __packed;
  222. struct kvm_vcpu_stat {
  223. u32 exit_userspace;
  224. u32 exit_null;
  225. u32 exit_external_request;
  226. u32 exit_external_interrupt;
  227. u32 exit_stop_request;
  228. u32 exit_validity;
  229. u32 exit_instruction;
  230. u32 halt_successful_poll;
  231. u32 halt_attempted_poll;
  232. u32 halt_poll_invalid;
  233. u32 halt_wakeup;
  234. u32 instruction_lctl;
  235. u32 instruction_lctlg;
  236. u32 instruction_stctl;
  237. u32 instruction_stctg;
  238. u32 exit_program_interruption;
  239. u32 exit_instr_and_program;
  240. u32 exit_operation_exception;
  241. u32 deliver_external_call;
  242. u32 deliver_emergency_signal;
  243. u32 deliver_service_signal;
  244. u32 deliver_virtio_interrupt;
  245. u32 deliver_stop_signal;
  246. u32 deliver_prefix_signal;
  247. u32 deliver_restart_signal;
  248. u32 deliver_program_int;
  249. u32 deliver_io_int;
  250. u32 exit_wait_state;
  251. u32 instruction_pfmf;
  252. u32 instruction_stidp;
  253. u32 instruction_spx;
  254. u32 instruction_stpx;
  255. u32 instruction_stap;
  256. u32 instruction_storage_key;
  257. u32 instruction_ipte_interlock;
  258. u32 instruction_stsch;
  259. u32 instruction_chsc;
  260. u32 instruction_stsi;
  261. u32 instruction_stfl;
  262. u32 instruction_tprot;
  263. u32 instruction_essa;
  264. u32 instruction_sthyi;
  265. u32 instruction_sigp_sense;
  266. u32 instruction_sigp_sense_running;
  267. u32 instruction_sigp_external_call;
  268. u32 instruction_sigp_emergency;
  269. u32 instruction_sigp_cond_emergency;
  270. u32 instruction_sigp_start;
  271. u32 instruction_sigp_stop;
  272. u32 instruction_sigp_stop_store_status;
  273. u32 instruction_sigp_store_status;
  274. u32 instruction_sigp_store_adtl_status;
  275. u32 instruction_sigp_arch;
  276. u32 instruction_sigp_prefix;
  277. u32 instruction_sigp_restart;
  278. u32 instruction_sigp_init_cpu_reset;
  279. u32 instruction_sigp_cpu_reset;
  280. u32 instruction_sigp_unknown;
  281. u32 diagnose_10;
  282. u32 diagnose_44;
  283. u32 diagnose_9c;
  284. u32 diagnose_258;
  285. u32 diagnose_308;
  286. u32 diagnose_500;
  287. };
  288. #define PGM_OPERATION 0x01
  289. #define PGM_PRIVILEGED_OP 0x02
  290. #define PGM_EXECUTE 0x03
  291. #define PGM_PROTECTION 0x04
  292. #define PGM_ADDRESSING 0x05
  293. #define PGM_SPECIFICATION 0x06
  294. #define PGM_DATA 0x07
  295. #define PGM_FIXED_POINT_OVERFLOW 0x08
  296. #define PGM_FIXED_POINT_DIVIDE 0x09
  297. #define PGM_DECIMAL_OVERFLOW 0x0a
  298. #define PGM_DECIMAL_DIVIDE 0x0b
  299. #define PGM_HFP_EXPONENT_OVERFLOW 0x0c
  300. #define PGM_HFP_EXPONENT_UNDERFLOW 0x0d
  301. #define PGM_HFP_SIGNIFICANCE 0x0e
  302. #define PGM_HFP_DIVIDE 0x0f
  303. #define PGM_SEGMENT_TRANSLATION 0x10
  304. #define PGM_PAGE_TRANSLATION 0x11
  305. #define PGM_TRANSLATION_SPEC 0x12
  306. #define PGM_SPECIAL_OPERATION 0x13
  307. #define PGM_OPERAND 0x15
  308. #define PGM_TRACE_TABEL 0x16
  309. #define PGM_VECTOR_PROCESSING 0x1b
  310. #define PGM_SPACE_SWITCH 0x1c
  311. #define PGM_HFP_SQUARE_ROOT 0x1d
  312. #define PGM_PC_TRANSLATION_SPEC 0x1f
  313. #define PGM_AFX_TRANSLATION 0x20
  314. #define PGM_ASX_TRANSLATION 0x21
  315. #define PGM_LX_TRANSLATION 0x22
  316. #define PGM_EX_TRANSLATION 0x23
  317. #define PGM_PRIMARY_AUTHORITY 0x24
  318. #define PGM_SECONDARY_AUTHORITY 0x25
  319. #define PGM_LFX_TRANSLATION 0x26
  320. #define PGM_LSX_TRANSLATION 0x27
  321. #define PGM_ALET_SPECIFICATION 0x28
  322. #define PGM_ALEN_TRANSLATION 0x29
  323. #define PGM_ALE_SEQUENCE 0x2a
  324. #define PGM_ASTE_VALIDITY 0x2b
  325. #define PGM_ASTE_SEQUENCE 0x2c
  326. #define PGM_EXTENDED_AUTHORITY 0x2d
  327. #define PGM_LSTE_SEQUENCE 0x2e
  328. #define PGM_ASTE_INSTANCE 0x2f
  329. #define PGM_STACK_FULL 0x30
  330. #define PGM_STACK_EMPTY 0x31
  331. #define PGM_STACK_SPECIFICATION 0x32
  332. #define PGM_STACK_TYPE 0x33
  333. #define PGM_STACK_OPERATION 0x34
  334. #define PGM_ASCE_TYPE 0x38
  335. #define PGM_REGION_FIRST_TRANS 0x39
  336. #define PGM_REGION_SECOND_TRANS 0x3a
  337. #define PGM_REGION_THIRD_TRANS 0x3b
  338. #define PGM_MONITOR 0x40
  339. #define PGM_PER 0x80
  340. #define PGM_CRYPTO_OPERATION 0x119
  341. /* irq types in order of priority */
  342. enum irq_types {
  343. IRQ_PEND_MCHK_EX = 0,
  344. IRQ_PEND_SVC,
  345. IRQ_PEND_PROG,
  346. IRQ_PEND_MCHK_REP,
  347. IRQ_PEND_EXT_IRQ_KEY,
  348. IRQ_PEND_EXT_MALFUNC,
  349. IRQ_PEND_EXT_EMERGENCY,
  350. IRQ_PEND_EXT_EXTERNAL,
  351. IRQ_PEND_EXT_CLOCK_COMP,
  352. IRQ_PEND_EXT_CPU_TIMER,
  353. IRQ_PEND_EXT_TIMING,
  354. IRQ_PEND_EXT_SERVICE,
  355. IRQ_PEND_EXT_HOST,
  356. IRQ_PEND_PFAULT_INIT,
  357. IRQ_PEND_PFAULT_DONE,
  358. IRQ_PEND_VIRTIO,
  359. IRQ_PEND_IO_ISC_0,
  360. IRQ_PEND_IO_ISC_1,
  361. IRQ_PEND_IO_ISC_2,
  362. IRQ_PEND_IO_ISC_3,
  363. IRQ_PEND_IO_ISC_4,
  364. IRQ_PEND_IO_ISC_5,
  365. IRQ_PEND_IO_ISC_6,
  366. IRQ_PEND_IO_ISC_7,
  367. IRQ_PEND_SIGP_STOP,
  368. IRQ_PEND_RESTART,
  369. IRQ_PEND_SET_PREFIX,
  370. IRQ_PEND_COUNT
  371. };
  372. /* We have 2M for virtio device descriptor pages. Smallest amount of
  373. * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
  374. */
  375. #define KVM_S390_MAX_VIRTIO_IRQS 87381
  376. /*
  377. * Repressible (non-floating) machine check interrupts
  378. * subclass bits in MCIC
  379. */
  380. #define MCHK_EXTD_BIT 58
  381. #define MCHK_DEGR_BIT 56
  382. #define MCHK_WARN_BIT 55
  383. #define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
  384. (1UL << MCHK_EXTD_BIT) | \
  385. (1UL << MCHK_WARN_BIT))
  386. /* Exigent machine check interrupts subclass bits in MCIC */
  387. #define MCHK_SD_BIT 63
  388. #define MCHK_PD_BIT 62
  389. #define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))
  390. #define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \
  391. (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
  392. (1UL << IRQ_PEND_EXT_CPU_TIMER) | \
  393. (1UL << IRQ_PEND_EXT_MALFUNC) | \
  394. (1UL << IRQ_PEND_EXT_EMERGENCY) | \
  395. (1UL << IRQ_PEND_EXT_EXTERNAL) | \
  396. (1UL << IRQ_PEND_EXT_TIMING) | \
  397. (1UL << IRQ_PEND_EXT_HOST) | \
  398. (1UL << IRQ_PEND_EXT_SERVICE) | \
  399. (1UL << IRQ_PEND_VIRTIO) | \
  400. (1UL << IRQ_PEND_PFAULT_INIT) | \
  401. (1UL << IRQ_PEND_PFAULT_DONE))
  402. #define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
  403. (1UL << IRQ_PEND_IO_ISC_1) | \
  404. (1UL << IRQ_PEND_IO_ISC_2) | \
  405. (1UL << IRQ_PEND_IO_ISC_3) | \
  406. (1UL << IRQ_PEND_IO_ISC_4) | \
  407. (1UL << IRQ_PEND_IO_ISC_5) | \
  408. (1UL << IRQ_PEND_IO_ISC_6) | \
  409. (1UL << IRQ_PEND_IO_ISC_7))
  410. #define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
  411. (1UL << IRQ_PEND_MCHK_EX))
  412. struct kvm_s390_interrupt_info {
  413. struct list_head list;
  414. u64 type;
  415. union {
  416. struct kvm_s390_io_info io;
  417. struct kvm_s390_ext_info ext;
  418. struct kvm_s390_pgm_info pgm;
  419. struct kvm_s390_emerg_info emerg;
  420. struct kvm_s390_extcall_info extcall;
  421. struct kvm_s390_prefix_info prefix;
  422. struct kvm_s390_stop_info stop;
  423. struct kvm_s390_mchk_info mchk;
  424. };
  425. };
  426. struct kvm_s390_irq_payload {
  427. struct kvm_s390_io_info io;
  428. struct kvm_s390_ext_info ext;
  429. struct kvm_s390_pgm_info pgm;
  430. struct kvm_s390_emerg_info emerg;
  431. struct kvm_s390_extcall_info extcall;
  432. struct kvm_s390_prefix_info prefix;
  433. struct kvm_s390_stop_info stop;
  434. struct kvm_s390_mchk_info mchk;
  435. };
  436. struct kvm_s390_local_interrupt {
  437. spinlock_t lock;
  438. struct kvm_s390_float_interrupt *float_int;
  439. struct swait_queue_head *wq;
  440. atomic_t *cpuflags;
  441. DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
  442. struct kvm_s390_irq_payload irq;
  443. unsigned long pending_irqs;
  444. };
  445. #define FIRQ_LIST_IO_ISC_0 0
  446. #define FIRQ_LIST_IO_ISC_1 1
  447. #define FIRQ_LIST_IO_ISC_2 2
  448. #define FIRQ_LIST_IO_ISC_3 3
  449. #define FIRQ_LIST_IO_ISC_4 4
  450. #define FIRQ_LIST_IO_ISC_5 5
  451. #define FIRQ_LIST_IO_ISC_6 6
  452. #define FIRQ_LIST_IO_ISC_7 7
  453. #define FIRQ_LIST_PFAULT 8
  454. #define FIRQ_LIST_VIRTIO 9
  455. #define FIRQ_LIST_COUNT 10
  456. #define FIRQ_CNTR_IO 0
  457. #define FIRQ_CNTR_SERVICE 1
  458. #define FIRQ_CNTR_VIRTIO 2
  459. #define FIRQ_CNTR_PFAULT 3
  460. #define FIRQ_MAX_COUNT 4
  461. struct kvm_s390_float_interrupt {
  462. unsigned long pending_irqs;
  463. spinlock_t lock;
  464. struct list_head lists[FIRQ_LIST_COUNT];
  465. int counters[FIRQ_MAX_COUNT];
  466. struct kvm_s390_mchk_info mchk;
  467. struct kvm_s390_ext_info srv_signal;
  468. int next_rr_cpu;
  469. unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
  470. };
  471. struct kvm_hw_wp_info_arch {
  472. unsigned long addr;
  473. unsigned long phys_addr;
  474. int len;
  475. char *old_data;
  476. };
  477. struct kvm_hw_bp_info_arch {
  478. unsigned long addr;
  479. int len;
  480. };
  481. /*
  482. * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
  483. * Further KVM_GUESTDBG flags which an be used from userspace can be found in
  484. * arch/s390/include/uapi/asm/kvm.h
  485. */
  486. #define KVM_GUESTDBG_EXIT_PENDING 0x10000000
  487. #define guestdbg_enabled(vcpu) \
  488. (vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
  489. #define guestdbg_sstep_enabled(vcpu) \
  490. (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  491. #define guestdbg_hw_bp_enabled(vcpu) \
  492. (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  493. #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
  494. (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
  495. struct kvm_guestdbg_info_arch {
  496. unsigned long cr0;
  497. unsigned long cr9;
  498. unsigned long cr10;
  499. unsigned long cr11;
  500. struct kvm_hw_bp_info_arch *hw_bp_info;
  501. struct kvm_hw_wp_info_arch *hw_wp_info;
  502. int nr_hw_bp;
  503. int nr_hw_wp;
  504. unsigned long last_bp;
  505. };
  506. struct kvm_vcpu_arch {
  507. struct kvm_s390_sie_block *sie_block;
  508. unsigned int host_acrs[NUM_ACRS];
  509. struct fpu host_fpregs;
  510. struct kvm_s390_local_interrupt local_int;
  511. struct hrtimer ckc_timer;
  512. struct kvm_s390_pgm_info pgm;
  513. struct gmap *gmap;
  514. struct kvm_guestdbg_info_arch guestdbg;
  515. unsigned long pfault_token;
  516. unsigned long pfault_select;
  517. unsigned long pfault_compare;
  518. bool cputm_enabled;
  519. /*
  520. * The seqcount protects updates to cputm_start and sie_block.cputm,
  521. * this way we can have non-blocking reads with consistent values.
  522. * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
  523. * values and to start/stop/enable/disable cpu timer accounting.
  524. */
  525. seqcount_t cputm_seqcount;
  526. __u64 cputm_start;
  527. };
  528. struct kvm_vm_stat {
  529. u32 remote_tlb_flush;
  530. };
  531. struct kvm_arch_memory_slot {
  532. };
  533. struct s390_map_info {
  534. struct list_head list;
  535. __u64 guest_addr;
  536. __u64 addr;
  537. struct page *page;
  538. };
  539. struct s390_io_adapter {
  540. unsigned int id;
  541. int isc;
  542. bool maskable;
  543. bool masked;
  544. bool swap;
  545. struct rw_semaphore maps_lock;
  546. struct list_head maps;
  547. atomic_t nr_maps;
  548. };
  549. #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
  550. #define MAX_S390_ADAPTER_MAPS 256
  551. /* maximum size of facilities and facility mask is 2k bytes */
  552. #define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
  553. #define S390_ARCH_FAC_LIST_SIZE_U64 \
  554. (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
  555. #define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
  556. #define S390_ARCH_FAC_MASK_SIZE_U64 \
  557. (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
  558. struct kvm_s390_cpu_model {
  559. /* facility mask supported by kvm & hosting machine */
  560. __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
  561. /* facility list requested by guest (in dma page) */
  562. __u64 *fac_list;
  563. u64 cpuid;
  564. unsigned short ibc;
  565. };
  566. struct kvm_s390_crypto {
  567. struct kvm_s390_crypto_cb *crycb;
  568. __u32 crycbd;
  569. __u8 aes_kw;
  570. __u8 dea_kw;
  571. };
  572. struct kvm_s390_crypto_cb {
  573. __u8 reserved00[72]; /* 0x0000 */
  574. __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
  575. __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
  576. __u8 reserved80[128]; /* 0x0080 */
  577. };
  578. /*
  579. * sie_page2 has to be allocated as DMA because fac_list and crycb need
  580. * 31bit addresses in the sie control block.
  581. */
  582. struct sie_page2 {
  583. __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
  584. struct kvm_s390_crypto_cb crycb; /* 0x0800 */
  585. u8 reserved900[0x1000 - 0x900]; /* 0x0900 */
  586. } __packed;
  587. struct kvm_arch{
  588. void *sca;
  589. int use_esca;
  590. rwlock_t sca_lock;
  591. debug_info_t *dbf;
  592. struct kvm_s390_float_interrupt float_int;
  593. struct kvm_device *flic;
  594. struct gmap *gmap;
  595. unsigned long mem_limit;
  596. int css_support;
  597. int use_irqchip;
  598. int use_cmma;
  599. int user_cpu_state_ctrl;
  600. int user_sigp;
  601. int user_stsi;
  602. struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
  603. wait_queue_head_t ipte_wq;
  604. int ipte_lock_count;
  605. struct mutex ipte_mutex;
  606. struct ratelimit_state sthyi_limit;
  607. spinlock_t start_stop_lock;
  608. struct sie_page2 *sie_page2;
  609. struct kvm_s390_cpu_model model;
  610. struct kvm_s390_crypto crypto;
  611. u64 epoch;
  612. };
  613. #define KVM_HVA_ERR_BAD (-1UL)
  614. #define KVM_HVA_ERR_RO_BAD (-2UL)
  615. static inline bool kvm_is_error_hva(unsigned long addr)
  616. {
  617. return IS_ERR_VALUE(addr);
  618. }
  619. #define ASYNC_PF_PER_VCPU 64
  620. struct kvm_arch_async_pf {
  621. unsigned long pfault_token;
  622. };
  623. bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
  624. void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
  625. struct kvm_async_pf *work);
  626. void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
  627. struct kvm_async_pf *work);
  628. void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
  629. struct kvm_async_pf *work);
  630. extern int sie64a(struct kvm_s390_sie_block *, u64 *);
  631. extern char sie_exit;
  632. static inline void kvm_arch_hardware_disable(void) {}
  633. static inline void kvm_arch_check_processor_compat(void *rtn) {}
  634. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  635. static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
  636. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  637. static inline void kvm_arch_free_memslot(struct kvm *kvm,
  638. struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
  639. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  640. static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
  641. static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  642. struct kvm_memory_slot *slot) {}
  643. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  644. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  645. void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);
  646. #endif