vgic-v2-sr.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * Copyright (C) 2012-2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/irqchip/arm-gic.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <asm/kvm_hyp.h>
  22. static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
  23. void __iomem *base)
  24. {
  25. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  26. int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
  27. u32 eisr0, eisr1;
  28. int i;
  29. bool expect_mi;
  30. expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
  31. for (i = 0; i < nr_lr; i++) {
  32. if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
  33. continue;
  34. expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
  35. (cpu_if->vgic_lr[i] & GICH_LR_EOI));
  36. }
  37. if (expect_mi) {
  38. cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
  39. if (cpu_if->vgic_misr & GICH_MISR_EOI) {
  40. eisr0 = readl_relaxed(base + GICH_EISR0);
  41. if (unlikely(nr_lr > 32))
  42. eisr1 = readl_relaxed(base + GICH_EISR1);
  43. else
  44. eisr1 = 0;
  45. } else {
  46. eisr0 = eisr1 = 0;
  47. }
  48. } else {
  49. cpu_if->vgic_misr = 0;
  50. eisr0 = eisr1 = 0;
  51. }
  52. #ifdef CONFIG_CPU_BIG_ENDIAN
  53. cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
  54. #else
  55. cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
  56. #endif
  57. }
  58. static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
  59. {
  60. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  61. int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
  62. u32 elrsr0, elrsr1;
  63. elrsr0 = readl_relaxed(base + GICH_ELRSR0);
  64. if (unlikely(nr_lr > 32))
  65. elrsr1 = readl_relaxed(base + GICH_ELRSR1);
  66. else
  67. elrsr1 = 0;
  68. #ifdef CONFIG_CPU_BIG_ENDIAN
  69. cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
  70. #else
  71. cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
  72. #endif
  73. }
  74. static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
  75. {
  76. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  77. int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
  78. int i;
  79. for (i = 0; i < nr_lr; i++) {
  80. if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
  81. continue;
  82. if (cpu_if->vgic_elrsr & (1UL << i))
  83. cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
  84. else
  85. cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
  86. writel_relaxed(0, base + GICH_LR0 + (i * 4));
  87. }
  88. }
  89. /* vcpu is already in the HYP VA space */
  90. void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
  91. {
  92. struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  93. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  94. struct vgic_dist *vgic = &kvm->arch.vgic;
  95. void __iomem *base = kern_hyp_va(vgic->vctrl_base);
  96. if (!base)
  97. return;
  98. cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
  99. if (vcpu->arch.vgic_cpu.live_lrs) {
  100. cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
  101. save_maint_int_state(vcpu, base);
  102. save_elrsr(vcpu, base);
  103. save_lrs(vcpu, base);
  104. writel_relaxed(0, base + GICH_HCR);
  105. vcpu->arch.vgic_cpu.live_lrs = 0;
  106. } else {
  107. cpu_if->vgic_eisr = 0;
  108. cpu_if->vgic_elrsr = ~0UL;
  109. cpu_if->vgic_misr = 0;
  110. cpu_if->vgic_apr = 0;
  111. }
  112. }
  113. /* vcpu is already in the HYP VA space */
  114. void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
  115. {
  116. struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  117. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  118. struct vgic_dist *vgic = &kvm->arch.vgic;
  119. void __iomem *base = kern_hyp_va(vgic->vctrl_base);
  120. int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
  121. int i;
  122. u64 live_lrs = 0;
  123. if (!base)
  124. return;
  125. for (i = 0; i < nr_lr; i++)
  126. if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
  127. live_lrs |= 1UL << i;
  128. if (live_lrs) {
  129. writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
  130. writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
  131. for (i = 0; i < nr_lr; i++) {
  132. if (!(live_lrs & (1UL << i)))
  133. continue;
  134. writel_relaxed(cpu_if->vgic_lr[i],
  135. base + GICH_LR0 + (i * 4));
  136. }
  137. }
  138. writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
  139. vcpu->arch.vgic_cpu.live_lrs = live_lrs;
  140. }
  141. #ifdef CONFIG_ARM64
  142. /*
  143. * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
  144. * guest.
  145. *
  146. * @vcpu: the offending vcpu
  147. *
  148. * Returns:
  149. * 1: GICV access successfully performed
  150. * 0: Not a GICV access
  151. * -1: Illegal GICV access
  152. */
  153. int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
  154. {
  155. struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  156. struct vgic_dist *vgic = &kvm->arch.vgic;
  157. phys_addr_t fault_ipa;
  158. void __iomem *addr;
  159. int rd;
  160. /* Build the full address */
  161. fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
  162. fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
  163. /* If not for GICV, move on */
  164. if (fault_ipa < vgic->vgic_cpu_base ||
  165. fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
  166. return 0;
  167. /* Reject anything but a 32bit access */
  168. if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
  169. return -1;
  170. /* Not aligned? Don't bother */
  171. if (fault_ipa & 3)
  172. return -1;
  173. rd = kvm_vcpu_dabt_get_rd(vcpu);
  174. addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
  175. addr += fault_ipa - vgic->vgic_cpu_base;
  176. if (kvm_vcpu_dabt_iswrite(vcpu)) {
  177. u32 data = vcpu_data_guest_to_host(vcpu,
  178. vcpu_get_reg(vcpu, rd),
  179. sizeof(u32));
  180. writel_relaxed(data, addr);
  181. } else {
  182. u32 data = readl_relaxed(addr);
  183. vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
  184. sizeof(u32)));
  185. }
  186. return 1;
  187. }
  188. #endif