vgic-v3-sr.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Copyright (C) 2012-2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/irqchip/arm-gic-v3.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_hyp.h>
  21. #define vtr_to_max_lr_idx(v) ((v) & 0xf)
  22. #define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
  23. static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
  24. {
  25. switch (lr & 0xf) {
  26. case 0:
  27. return read_gicreg(ICH_LR0_EL2);
  28. case 1:
  29. return read_gicreg(ICH_LR1_EL2);
  30. case 2:
  31. return read_gicreg(ICH_LR2_EL2);
  32. case 3:
  33. return read_gicreg(ICH_LR3_EL2);
  34. case 4:
  35. return read_gicreg(ICH_LR4_EL2);
  36. case 5:
  37. return read_gicreg(ICH_LR5_EL2);
  38. case 6:
  39. return read_gicreg(ICH_LR6_EL2);
  40. case 7:
  41. return read_gicreg(ICH_LR7_EL2);
  42. case 8:
  43. return read_gicreg(ICH_LR8_EL2);
  44. case 9:
  45. return read_gicreg(ICH_LR9_EL2);
  46. case 10:
  47. return read_gicreg(ICH_LR10_EL2);
  48. case 11:
  49. return read_gicreg(ICH_LR11_EL2);
  50. case 12:
  51. return read_gicreg(ICH_LR12_EL2);
  52. case 13:
  53. return read_gicreg(ICH_LR13_EL2);
  54. case 14:
  55. return read_gicreg(ICH_LR14_EL2);
  56. case 15:
  57. return read_gicreg(ICH_LR15_EL2);
  58. }
  59. unreachable();
  60. }
  61. static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
  62. {
  63. switch (lr & 0xf) {
  64. case 0:
  65. write_gicreg(val, ICH_LR0_EL2);
  66. break;
  67. case 1:
  68. write_gicreg(val, ICH_LR1_EL2);
  69. break;
  70. case 2:
  71. write_gicreg(val, ICH_LR2_EL2);
  72. break;
  73. case 3:
  74. write_gicreg(val, ICH_LR3_EL2);
  75. break;
  76. case 4:
  77. write_gicreg(val, ICH_LR4_EL2);
  78. break;
  79. case 5:
  80. write_gicreg(val, ICH_LR5_EL2);
  81. break;
  82. case 6:
  83. write_gicreg(val, ICH_LR6_EL2);
  84. break;
  85. case 7:
  86. write_gicreg(val, ICH_LR7_EL2);
  87. break;
  88. case 8:
  89. write_gicreg(val, ICH_LR8_EL2);
  90. break;
  91. case 9:
  92. write_gicreg(val, ICH_LR9_EL2);
  93. break;
  94. case 10:
  95. write_gicreg(val, ICH_LR10_EL2);
  96. break;
  97. case 11:
  98. write_gicreg(val, ICH_LR11_EL2);
  99. break;
  100. case 12:
  101. write_gicreg(val, ICH_LR12_EL2);
  102. break;
  103. case 13:
  104. write_gicreg(val, ICH_LR13_EL2);
  105. break;
  106. case 14:
  107. write_gicreg(val, ICH_LR14_EL2);
  108. break;
  109. case 15:
  110. write_gicreg(val, ICH_LR15_EL2);
  111. break;
  112. }
  113. }
  114. static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
  115. {
  116. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  117. int i;
  118. bool expect_mi;
  119. expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
  120. for (i = 0; i < nr_lr; i++) {
  121. if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
  122. continue;
  123. expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
  124. (cpu_if->vgic_lr[i] & ICH_LR_EOI));
  125. }
  126. if (expect_mi) {
  127. cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
  128. if (cpu_if->vgic_misr & ICH_MISR_EOI)
  129. cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
  130. else
  131. cpu_if->vgic_eisr = 0;
  132. } else {
  133. cpu_if->vgic_misr = 0;
  134. cpu_if->vgic_eisr = 0;
  135. }
  136. }
  137. void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
  138. {
  139. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  140. u64 val;
  141. /*
  142. * Make sure stores to the GIC via the memory mapped interface
  143. * are now visible to the system register interface.
  144. */
  145. if (!cpu_if->vgic_sre)
  146. dsb(st);
  147. cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
  148. if (vcpu->arch.vgic_cpu.live_lrs) {
  149. int i;
  150. u32 max_lr_idx, nr_pri_bits;
  151. cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
  152. write_gicreg(0, ICH_HCR_EL2);
  153. val = read_gicreg(ICH_VTR_EL2);
  154. max_lr_idx = vtr_to_max_lr_idx(val);
  155. nr_pri_bits = vtr_to_nr_pri_bits(val);
  156. save_maint_int_state(vcpu, max_lr_idx + 1);
  157. for (i = 0; i <= max_lr_idx; i++) {
  158. if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
  159. continue;
  160. if (cpu_if->vgic_elrsr & (1 << i))
  161. cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
  162. else
  163. cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
  164. __gic_v3_set_lr(0, i);
  165. }
  166. switch (nr_pri_bits) {
  167. case 7:
  168. cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
  169. cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
  170. case 6:
  171. cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
  172. default:
  173. cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
  174. }
  175. switch (nr_pri_bits) {
  176. case 7:
  177. cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
  178. cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
  179. case 6:
  180. cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
  181. default:
  182. cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
  183. }
  184. vcpu->arch.vgic_cpu.live_lrs = 0;
  185. } else {
  186. cpu_if->vgic_misr = 0;
  187. cpu_if->vgic_eisr = 0;
  188. cpu_if->vgic_elrsr = 0xffff;
  189. cpu_if->vgic_ap0r[0] = 0;
  190. cpu_if->vgic_ap0r[1] = 0;
  191. cpu_if->vgic_ap0r[2] = 0;
  192. cpu_if->vgic_ap0r[3] = 0;
  193. cpu_if->vgic_ap1r[0] = 0;
  194. cpu_if->vgic_ap1r[1] = 0;
  195. cpu_if->vgic_ap1r[2] = 0;
  196. cpu_if->vgic_ap1r[3] = 0;
  197. }
  198. val = read_gicreg(ICC_SRE_EL2);
  199. write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
  200. if (!cpu_if->vgic_sre) {
  201. /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
  202. isb();
  203. write_gicreg(1, ICC_SRE_EL1);
  204. }
  205. }
  206. void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
  207. {
  208. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  209. u64 val;
  210. u32 max_lr_idx, nr_pri_bits;
  211. u16 live_lrs = 0;
  212. int i;
  213. /*
  214. * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
  215. * Group0 interrupt (as generated in GICv2 mode) to be
  216. * delivered as a FIQ to the guest, with potentially fatal
  217. * consequences. So we must make sure that ICC_SRE_EL1 has
  218. * been actually programmed with the value we want before
  219. * starting to mess with the rest of the GIC.
  220. */
  221. if (!cpu_if->vgic_sre) {
  222. write_gicreg(0, ICC_SRE_EL1);
  223. isb();
  224. }
  225. val = read_gicreg(ICH_VTR_EL2);
  226. max_lr_idx = vtr_to_max_lr_idx(val);
  227. nr_pri_bits = vtr_to_nr_pri_bits(val);
  228. for (i = 0; i <= max_lr_idx; i++) {
  229. if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
  230. live_lrs |= (1 << i);
  231. }
  232. write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
  233. if (live_lrs) {
  234. write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
  235. switch (nr_pri_bits) {
  236. case 7:
  237. write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
  238. write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
  239. case 6:
  240. write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
  241. default:
  242. write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
  243. }
  244. switch (nr_pri_bits) {
  245. case 7:
  246. write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
  247. write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
  248. case 6:
  249. write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
  250. default:
  251. write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
  252. }
  253. for (i = 0; i <= max_lr_idx; i++) {
  254. if (!(live_lrs & (1 << i)))
  255. continue;
  256. __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
  257. }
  258. }
  259. /*
  260. * Ensures that the above will have reached the
  261. * (re)distributors. This ensure the guest will read the
  262. * correct values from the memory-mapped interface.
  263. */
  264. if (!cpu_if->vgic_sre) {
  265. isb();
  266. dsb(sy);
  267. }
  268. vcpu->arch.vgic_cpu.live_lrs = live_lrs;
  269. /*
  270. * Prevent the guest from touching the GIC system registers if
  271. * SRE isn't enabled for GICv3 emulation.
  272. */
  273. write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
  274. ICC_SRE_EL2);
  275. }
  276. void __hyp_text __vgic_v3_init_lrs(void)
  277. {
  278. int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
  279. int i;
  280. for (i = 0; i <= max_lr_idx; i++)
  281. __gic_v3_set_lr(0, i);
  282. }
  283. u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
  284. {
  285. return read_gicreg(ICH_VTR_EL2);
  286. }