vsie.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * kvm nested virtualization support for s390x
  3. *
  4. * Copyright IBM Corp. 2016
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  11. */
  12. #include <linux/vmalloc.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/bug.h>
  15. #include <linux/list.h>
  16. #include <linux/bitmap.h>
  17. #include <asm/gmap.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/sclp.h>
  20. #include <asm/nmi.h>
  21. #include <asm/dis.h>
  22. #include "kvm-s390.h"
  23. #include "gaccess.h"
  24. struct vsie_page {
  25. struct kvm_s390_sie_block scb_s; /* 0x0000 */
  26. /* the pinned originial scb */
  27. struct kvm_s390_sie_block *scb_o; /* 0x0200 */
  28. /* the shadow gmap in use by the vsie_page */
  29. struct gmap *gmap; /* 0x0208 */
  30. /* address of the last reported fault to guest2 */
  31. unsigned long fault_addr; /* 0x0210 */
  32. __u8 reserved[0x0700 - 0x0218]; /* 0x0218 */
  33. struct kvm_s390_crypto_cb crycb; /* 0x0700 */
  34. __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
  35. } __packed;
  36. /* trigger a validity icpt for the given scb */
  37. static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  38. __u16 reason_code)
  39. {
  40. scb->ipa = 0x1000;
  41. scb->ipb = ((__u32) reason_code) << 16;
  42. scb->icptcode = ICPT_VALIDITY;
  43. return 1;
  44. }
  45. /* mark the prefix as unmapped, this will block the VSIE */
  46. static void prefix_unmapped(struct vsie_page *vsie_page)
  47. {
  48. atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  49. }
  50. /* mark the prefix as unmapped and wait until the VSIE has been left */
  51. static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  52. {
  53. prefix_unmapped(vsie_page);
  54. if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  55. atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  56. while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  57. cpu_relax();
  58. }
  59. /* mark the prefix as mapped, this will allow the VSIE to run */
  60. static void prefix_mapped(struct vsie_page *vsie_page)
  61. {
  62. atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  63. }
  64. /* test if the prefix is mapped into the gmap shadow */
  65. static int prefix_is_mapped(struct vsie_page *vsie_page)
  66. {
  67. return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  68. }
  69. /* copy the updated intervention request bits into the shadow scb */
  70. static void update_intervention_requests(struct vsie_page *vsie_page)
  71. {
  72. const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  73. int cpuflags;
  74. cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  75. atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  76. atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  77. }
  78. /* shadow (filter and validate) the cpuflags */
  79. static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  80. {
  81. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  82. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  83. int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
  84. /* we don't allow ESA/390 guests */
  85. if (!(cpuflags & CPUSTAT_ZARCH))
  86. return set_validity_icpt(scb_s, 0x0001U);
  87. if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
  88. return set_validity_icpt(scb_s, 0x0001U);
  89. else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
  90. return set_validity_icpt(scb_s, 0x0007U);
  91. /* intervention requests will be set later */
  92. newflags = CPUSTAT_ZARCH;
  93. if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
  94. newflags |= CPUSTAT_GED;
  95. if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
  96. if (cpuflags & CPUSTAT_GED)
  97. return set_validity_icpt(scb_s, 0x0001U);
  98. newflags |= CPUSTAT_GED2;
  99. }
  100. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
  101. newflags |= cpuflags & CPUSTAT_P;
  102. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
  103. newflags |= cpuflags & CPUSTAT_SM;
  104. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
  105. newflags |= cpuflags & CPUSTAT_IBS;
  106. atomic_set(&scb_s->cpuflags, newflags);
  107. return 0;
  108. }
  109. /*
  110. * Create a shadow copy of the crycb block and setup key wrapping, if
  111. * requested for guest 3 and enabled for guest 2.
  112. *
  113. * We only accept format-1 (no AP in g2), but convert it into format-2
  114. * There is nothing to do for format-0.
  115. *
  116. * Returns: - 0 if shadowed or nothing to do
  117. * - > 0 if control has to be given to guest 2
  118. */
  119. static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  120. {
  121. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  122. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  123. u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U;
  124. unsigned long *b1, *b2;
  125. u8 ecb3_flags;
  126. scb_s->crycbd = 0;
  127. if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
  128. return 0;
  129. /* format-1 is supported with message-security-assist extension 3 */
  130. if (!test_kvm_facility(vcpu->kvm, 76))
  131. return 0;
  132. /* we may only allow it if enabled for guest 2 */
  133. ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
  134. (ECB3_AES | ECB3_DEA);
  135. if (!ecb3_flags)
  136. return 0;
  137. if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
  138. return set_validity_icpt(scb_s, 0x003CU);
  139. else if (!crycb_addr)
  140. return set_validity_icpt(scb_s, 0x0039U);
  141. /* copy only the wrapping keys */
  142. if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
  143. return set_validity_icpt(scb_s, 0x0035U);
  144. scb_s->ecb3 |= ecb3_flags;
  145. scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
  146. CRYCB_FORMAT2;
  147. /* xor both blocks in one run */
  148. b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
  149. b2 = (unsigned long *)
  150. vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
  151. /* as 56%8 == 0, bitmap_xor won't overwrite any data */
  152. bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
  153. return 0;
  154. }
  155. /* shadow (round up/down) the ibc to avoid validity icpt */
  156. static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  157. {
  158. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  159. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  160. __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
  161. scb_s->ibc = 0;
  162. /* ibc installed in g2 and requested for g3 */
  163. if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) {
  164. scb_s->ibc = scb_o->ibc & 0x0fffU;
  165. /* takte care of the minimum ibc level of the machine */
  166. if (scb_s->ibc < min_ibc)
  167. scb_s->ibc = min_ibc;
  168. /* take care of the maximum ibc level set for the guest */
  169. if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
  170. scb_s->ibc = vcpu->kvm->arch.model.ibc;
  171. }
  172. }
  173. /* unshadow the scb, copying parameters back to the real scb */
  174. static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  175. {
  176. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  177. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  178. /* interception */
  179. scb_o->icptcode = scb_s->icptcode;
  180. scb_o->icptstatus = scb_s->icptstatus;
  181. scb_o->ipa = scb_s->ipa;
  182. scb_o->ipb = scb_s->ipb;
  183. scb_o->gbea = scb_s->gbea;
  184. /* timer */
  185. scb_o->cputm = scb_s->cputm;
  186. scb_o->ckc = scb_s->ckc;
  187. scb_o->todpr = scb_s->todpr;
  188. /* guest state */
  189. scb_o->gpsw = scb_s->gpsw;
  190. scb_o->gg14 = scb_s->gg14;
  191. scb_o->gg15 = scb_s->gg15;
  192. memcpy(scb_o->gcr, scb_s->gcr, 128);
  193. scb_o->pp = scb_s->pp;
  194. /* interrupt intercept */
  195. switch (scb_s->icptcode) {
  196. case ICPT_PROGI:
  197. case ICPT_INSTPROGI:
  198. case ICPT_EXTINT:
  199. memcpy((void *)((u64)scb_o + 0xc0),
  200. (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
  201. break;
  202. case ICPT_PARTEXEC:
  203. /* MVPG only */
  204. memcpy((void *)((u64)scb_o + 0xc0),
  205. (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
  206. break;
  207. }
  208. if (scb_s->ihcpu != 0xffffU)
  209. scb_o->ihcpu = scb_s->ihcpu;
  210. }
  211. /*
  212. * Setup the shadow scb by copying and checking the relevant parts of the g2
  213. * provided scb.
  214. *
  215. * Returns: - 0 if the scb has been shadowed
  216. * - > 0 if control has to be given to guest 2
  217. */
  218. static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  219. {
  220. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  221. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  222. bool had_tx = scb_s->ecb & 0x10U;
  223. unsigned long new_mso = 0;
  224. int rc;
  225. /* make sure we don't have any leftovers when reusing the scb */
  226. scb_s->icptcode = 0;
  227. scb_s->eca = 0;
  228. scb_s->ecb = 0;
  229. scb_s->ecb2 = 0;
  230. scb_s->ecb3 = 0;
  231. scb_s->ecd = 0;
  232. scb_s->fac = 0;
  233. rc = prepare_cpuflags(vcpu, vsie_page);
  234. if (rc)
  235. goto out;
  236. /* timer */
  237. scb_s->cputm = scb_o->cputm;
  238. scb_s->ckc = scb_o->ckc;
  239. scb_s->todpr = scb_o->todpr;
  240. scb_s->epoch = scb_o->epoch;
  241. /* guest state */
  242. scb_s->gpsw = scb_o->gpsw;
  243. scb_s->gg14 = scb_o->gg14;
  244. scb_s->gg15 = scb_o->gg15;
  245. memcpy(scb_s->gcr, scb_o->gcr, 128);
  246. scb_s->pp = scb_o->pp;
  247. /* interception / execution handling */
  248. scb_s->gbea = scb_o->gbea;
  249. scb_s->lctl = scb_o->lctl;
  250. scb_s->svcc = scb_o->svcc;
  251. scb_s->ictl = scb_o->ictl;
  252. /*
  253. * SKEY handling functions can't deal with false setting of PTE invalid
  254. * bits. Therefore we cannot provide interpretation and would later
  255. * have to provide own emulation handlers.
  256. */
  257. scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
  258. scb_s->icpua = scb_o->icpua;
  259. if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
  260. new_mso = scb_o->mso & 0xfffffffffff00000UL;
  261. /* if the hva of the prefix changes, we have to remap the prefix */
  262. if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
  263. prefix_unmapped(vsie_page);
  264. /* SIE will do mso/msl validity and exception checks for us */
  265. scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
  266. scb_s->mso = new_mso;
  267. scb_s->prefix = scb_o->prefix;
  268. /* We have to definetly flush the tlb if this scb never ran */
  269. if (scb_s->ihcpu != 0xffffU)
  270. scb_s->ihcpu = scb_o->ihcpu;
  271. /* MVPG and Protection Exception Interpretation are always available */
  272. scb_s->eca |= scb_o->eca & 0x01002000U;
  273. /* Host-protection-interruption introduced with ESOP */
  274. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
  275. scb_s->ecb |= scb_o->ecb & 0x02U;
  276. /* transactional execution */
  277. if (test_kvm_facility(vcpu->kvm, 73)) {
  278. /* remap the prefix is tx is toggled on */
  279. if ((scb_o->ecb & 0x10U) && !had_tx)
  280. prefix_unmapped(vsie_page);
  281. scb_s->ecb |= scb_o->ecb & 0x10U;
  282. }
  283. /* SIMD */
  284. if (test_kvm_facility(vcpu->kvm, 129)) {
  285. scb_s->eca |= scb_o->eca & 0x00020000U;
  286. scb_s->ecd |= scb_o->ecd & 0x20000000U;
  287. }
  288. /* Run-time-Instrumentation */
  289. if (test_kvm_facility(vcpu->kvm, 64))
  290. scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
  291. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
  292. scb_s->eca |= scb_o->eca & 0x00000001U;
  293. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
  294. scb_s->eca |= scb_o->eca & 0x40000000U;
  295. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
  296. scb_s->eca |= scb_o->eca & 0x80000000U;
  297. prepare_ibc(vcpu, vsie_page);
  298. rc = shadow_crycb(vcpu, vsie_page);
  299. out:
  300. if (rc)
  301. unshadow_scb(vcpu, vsie_page);
  302. return rc;
  303. }
  304. void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
  305. unsigned long end)
  306. {
  307. struct kvm *kvm = gmap->private;
  308. struct vsie_page *cur;
  309. unsigned long prefix;
  310. struct page *page;
  311. int i;
  312. if (!gmap_is_shadow(gmap))
  313. return;
  314. if (start >= 1UL << 31)
  315. /* We are only interested in prefix pages */
  316. return;
  317. /*
  318. * Only new shadow blocks are added to the list during runtime,
  319. * therefore we can safely reference them all the time.
  320. */
  321. for (i = 0; i < kvm->arch.vsie.page_count; i++) {
  322. page = READ_ONCE(kvm->arch.vsie.pages[i]);
  323. if (!page)
  324. continue;
  325. cur = page_to_virt(page);
  326. if (READ_ONCE(cur->gmap) != gmap)
  327. continue;
  328. prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
  329. /* with mso/msl, the prefix lies at an offset */
  330. prefix += cur->scb_s.mso;
  331. if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
  332. prefix_unmapped_sync(cur);
  333. }
  334. }
  335. /*
  336. * Map the first prefix page and if tx is enabled also the second prefix page.
  337. *
  338. * The prefix will be protected, a gmap notifier will inform about unmaps.
  339. * The shadow scb must not be executed until the prefix is remapped, this is
  340. * guaranteed by properly handling PROG_REQUEST.
  341. *
  342. * Returns: - 0 on if successfully mapped or already mapped
  343. * - > 0 if control has to be given to guest 2
  344. * - -EAGAIN if the caller can retry immediately
  345. * - -ENOMEM if out of memory
  346. */
  347. static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  348. {
  349. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  350. u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
  351. int rc;
  352. if (prefix_is_mapped(vsie_page))
  353. return 0;
  354. /* mark it as mapped so we can catch any concurrent unmappers */
  355. prefix_mapped(vsie_page);
  356. /* with mso/msl, the prefix lies at offset *mso* */
  357. prefix += scb_s->mso;
  358. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
  359. if (!rc && (scb_s->ecb & 0x10U))
  360. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  361. prefix + PAGE_SIZE);
  362. /*
  363. * We don't have to mprotect, we will be called for all unshadows.
  364. * SIE will detect if protection applies and trigger a validity.
  365. */
  366. if (rc)
  367. prefix_unmapped(vsie_page);
  368. if (rc > 0 || rc == -EFAULT)
  369. rc = set_validity_icpt(scb_s, 0x0037U);
  370. return rc;
  371. }
  372. /*
  373. * Pin the guest page given by gpa and set hpa to the pinned host address.
  374. * Will always be pinned writable.
  375. *
  376. * Returns: - 0 on success
  377. * - -EINVAL if the gpa is not valid guest storage
  378. * - -ENOMEM if out of memory
  379. */
  380. static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
  381. {
  382. struct page *page;
  383. hva_t hva;
  384. int rc;
  385. hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
  386. if (kvm_is_error_hva(hva))
  387. return -EINVAL;
  388. rc = get_user_pages_fast(hva, 1, 1, &page);
  389. if (rc < 0)
  390. return rc;
  391. else if (rc != 1)
  392. return -ENOMEM;
  393. *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
  394. return 0;
  395. }
  396. /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
  397. static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
  398. {
  399. struct page *page;
  400. page = virt_to_page(hpa);
  401. set_page_dirty_lock(page);
  402. put_page(page);
  403. /* mark the page always as dirty for migration */
  404. mark_page_dirty(kvm, gpa_to_gfn(gpa));
  405. }
  406. /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
  407. static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  408. {
  409. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  410. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  411. hpa_t hpa;
  412. gpa_t gpa;
  413. hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
  414. if (hpa) {
  415. gpa = scb_o->scaol & ~0xfUL;
  416. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
  417. gpa |= (u64) scb_o->scaoh << 32;
  418. unpin_guest_page(vcpu->kvm, gpa, hpa);
  419. scb_s->scaol = 0;
  420. scb_s->scaoh = 0;
  421. }
  422. hpa = scb_s->itdba;
  423. if (hpa) {
  424. gpa = scb_o->itdba & ~0xffUL;
  425. unpin_guest_page(vcpu->kvm, gpa, hpa);
  426. scb_s->itdba = 0;
  427. }
  428. hpa = scb_s->gvrd;
  429. if (hpa) {
  430. gpa = scb_o->gvrd & ~0x1ffUL;
  431. unpin_guest_page(vcpu->kvm, gpa, hpa);
  432. scb_s->gvrd = 0;
  433. }
  434. hpa = scb_s->riccbd;
  435. if (hpa) {
  436. gpa = scb_o->riccbd & ~0x3fUL;
  437. unpin_guest_page(vcpu->kvm, gpa, hpa);
  438. scb_s->riccbd = 0;
  439. }
  440. }
  441. /*
  442. * Instead of shadowing some blocks, we can simply forward them because the
  443. * addresses in the scb are 64 bit long.
  444. *
  445. * This works as long as the data lies in one page. If blocks ever exceed one
  446. * page, we have to fall back to shadowing.
  447. *
  448. * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
  449. * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
  450. *
  451. * Returns: - 0 if all blocks were pinned.
  452. * - > 0 if control has to be given to guest 2
  453. * - -ENOMEM if out of memory
  454. */
  455. static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  456. {
  457. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  458. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  459. hpa_t hpa;
  460. gpa_t gpa;
  461. int rc = 0;
  462. gpa = scb_o->scaol & ~0xfUL;
  463. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
  464. gpa |= (u64) scb_o->scaoh << 32;
  465. if (gpa) {
  466. if (!(gpa & ~0x1fffUL))
  467. rc = set_validity_icpt(scb_s, 0x0038U);
  468. else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
  469. rc = set_validity_icpt(scb_s, 0x0011U);
  470. else if ((gpa & PAGE_MASK) !=
  471. ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
  472. rc = set_validity_icpt(scb_s, 0x003bU);
  473. if (!rc) {
  474. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  475. if (rc == -EINVAL)
  476. rc = set_validity_icpt(scb_s, 0x0034U);
  477. }
  478. if (rc)
  479. goto unpin;
  480. scb_s->scaoh = (u32)((u64)hpa >> 32);
  481. scb_s->scaol = (u32)(u64)hpa;
  482. }
  483. gpa = scb_o->itdba & ~0xffUL;
  484. if (gpa && (scb_s->ecb & 0x10U)) {
  485. if (!(gpa & ~0x1fffU)) {
  486. rc = set_validity_icpt(scb_s, 0x0080U);
  487. goto unpin;
  488. }
  489. /* 256 bytes cannot cross page boundaries */
  490. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  491. if (rc == -EINVAL)
  492. rc = set_validity_icpt(scb_s, 0x0080U);
  493. if (rc)
  494. goto unpin;
  495. scb_s->itdba = hpa;
  496. }
  497. gpa = scb_o->gvrd & ~0x1ffUL;
  498. if (gpa && (scb_s->eca & 0x00020000U) &&
  499. !(scb_s->ecd & 0x20000000U)) {
  500. if (!(gpa & ~0x1fffUL)) {
  501. rc = set_validity_icpt(scb_s, 0x1310U);
  502. goto unpin;
  503. }
  504. /*
  505. * 512 bytes vector registers cannot cross page boundaries
  506. * if this block gets bigger, we have to shadow it.
  507. */
  508. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  509. if (rc == -EINVAL)
  510. rc = set_validity_icpt(scb_s, 0x1310U);
  511. if (rc)
  512. goto unpin;
  513. scb_s->gvrd = hpa;
  514. }
  515. gpa = scb_o->riccbd & ~0x3fUL;
  516. if (gpa && (scb_s->ecb3 & 0x01U)) {
  517. if (!(gpa & ~0x1fffUL)) {
  518. rc = set_validity_icpt(scb_s, 0x0043U);
  519. goto unpin;
  520. }
  521. /* 64 bytes cannot cross page boundaries */
  522. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  523. if (rc == -EINVAL)
  524. rc = set_validity_icpt(scb_s, 0x0043U);
  525. /* Validity 0x0044 will be checked by SIE */
  526. if (rc)
  527. goto unpin;
  528. scb_s->riccbd = hpa;
  529. }
  530. return 0;
  531. unpin:
  532. unpin_blocks(vcpu, vsie_page);
  533. return rc;
  534. }
  535. /* unpin the scb provided by guest 2, marking it as dirty */
  536. static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
  537. gpa_t gpa)
  538. {
  539. hpa_t hpa = (hpa_t) vsie_page->scb_o;
  540. if (hpa)
  541. unpin_guest_page(vcpu->kvm, gpa, hpa);
  542. vsie_page->scb_o = NULL;
  543. }
  544. /*
  545. * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
  546. *
  547. * Returns: - 0 if the scb was pinned.
  548. * - > 0 if control has to be given to guest 2
  549. * - -ENOMEM if out of memory
  550. */
  551. static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
  552. gpa_t gpa)
  553. {
  554. hpa_t hpa;
  555. int rc;
  556. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  557. if (rc == -EINVAL) {
  558. rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  559. if (!rc)
  560. rc = 1;
  561. }
  562. if (!rc)
  563. vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
  564. return rc;
  565. }
  566. /*
  567. * Inject a fault into guest 2.
  568. *
  569. * Returns: - > 0 if control has to be given to guest 2
  570. * < 0 if an error occurred during injection.
  571. */
  572. static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
  573. bool write_flag)
  574. {
  575. struct kvm_s390_pgm_info pgm = {
  576. .code = code,
  577. .trans_exc_code =
  578. /* 0-51: virtual address */
  579. (vaddr & 0xfffffffffffff000UL) |
  580. /* 52-53: store / fetch */
  581. (((unsigned int) !write_flag) + 1) << 10,
  582. /* 62-63: asce id (alway primary == 0) */
  583. .exc_access_id = 0, /* always primary */
  584. .op_access_id = 0, /* not MVPG */
  585. };
  586. int rc;
  587. if (code == PGM_PROTECTION)
  588. pgm.trans_exc_code |= 0x4UL;
  589. rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
  590. return rc ? rc : 1;
  591. }
  592. /*
  593. * Handle a fault during vsie execution on a gmap shadow.
  594. *
  595. * Returns: - 0 if the fault was resolved
  596. * - > 0 if control has to be given to guest 2
  597. * - < 0 if an error occurred
  598. */
  599. static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  600. {
  601. int rc;
  602. if (current->thread.gmap_int_code == PGM_PROTECTION)
  603. /* we can directly forward all protection exceptions */
  604. return inject_fault(vcpu, PGM_PROTECTION,
  605. current->thread.gmap_addr, 1);
  606. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  607. current->thread.gmap_addr);
  608. if (rc > 0) {
  609. rc = inject_fault(vcpu, rc,
  610. current->thread.gmap_addr,
  611. current->thread.gmap_write_flag);
  612. if (rc >= 0)
  613. vsie_page->fault_addr = current->thread.gmap_addr;
  614. }
  615. return rc;
  616. }
  617. /*
  618. * Retry the previous fault that required guest 2 intervention. This avoids
  619. * one superfluous SIE re-entry and direct exit.
  620. *
  621. * Will ignore any errors. The next SIE fault will do proper fault handling.
  622. */
  623. static void handle_last_fault(struct kvm_vcpu *vcpu,
  624. struct vsie_page *vsie_page)
  625. {
  626. if (vsie_page->fault_addr)
  627. kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  628. vsie_page->fault_addr);
  629. vsie_page->fault_addr = 0;
  630. }
  631. static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
  632. {
  633. vsie_page->scb_s.icptcode = 0;
  634. }
  635. /* rewind the psw and clear the vsie icpt, so we can retry execution */
  636. static void retry_vsie_icpt(struct vsie_page *vsie_page)
  637. {
  638. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  639. int ilen = insn_length(scb_s->ipa >> 8);
  640. /* take care of EXECUTE instructions */
  641. if (scb_s->icptstatus & 1) {
  642. ilen = (scb_s->icptstatus >> 4) & 0x6;
  643. if (!ilen)
  644. ilen = 4;
  645. }
  646. scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
  647. clear_vsie_icpt(vsie_page);
  648. }
  649. /*
  650. * Try to shadow + enable the guest 2 provided facility list.
  651. * Retry instruction execution if enabled for and provided by guest 2.
  652. *
  653. * Returns: - 0 if handled (retry or guest 2 icpt)
  654. * - > 0 if control has to be given to guest 2
  655. */
  656. static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  657. {
  658. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  659. __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U;
  660. if (fac && test_kvm_facility(vcpu->kvm, 7)) {
  661. retry_vsie_icpt(vsie_page);
  662. if (read_guest_real(vcpu, fac, &vsie_page->fac,
  663. sizeof(vsie_page->fac)))
  664. return set_validity_icpt(scb_s, 0x1090U);
  665. scb_s->fac = (__u32)(__u64) &vsie_page->fac;
  666. }
  667. return 0;
  668. }
  669. /*
  670. * Run the vsie on a shadow scb and a shadow gmap, without any further
  671. * sanity checks, handling SIE faults.
  672. *
  673. * Returns: - 0 everything went fine
  674. * - > 0 if control has to be given to guest 2
  675. * - < 0 if an error occurred
  676. */
  677. static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  678. {
  679. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  680. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  681. int rc;
  682. handle_last_fault(vcpu, vsie_page);
  683. if (need_resched())
  684. schedule();
  685. if (test_cpu_flag(CIF_MCCK_PENDING))
  686. s390_handle_mcck();
  687. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  688. local_irq_disable();
  689. guest_enter_irqoff();
  690. local_irq_enable();
  691. rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
  692. local_irq_disable();
  693. guest_exit_irqoff();
  694. local_irq_enable();
  695. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  696. if (rc > 0)
  697. rc = 0; /* we could still have an icpt */
  698. else if (rc == -EFAULT)
  699. return handle_fault(vcpu, vsie_page);
  700. switch (scb_s->icptcode) {
  701. case ICPT_INST:
  702. if (scb_s->ipa == 0xb2b0)
  703. rc = handle_stfle(vcpu, vsie_page);
  704. break;
  705. case ICPT_STOP:
  706. /* stop not requested by g2 - must have been a kick */
  707. if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
  708. clear_vsie_icpt(vsie_page);
  709. break;
  710. case ICPT_VALIDITY:
  711. if ((scb_s->ipa & 0xf000) != 0xf000)
  712. scb_s->ipa += 0x1000;
  713. break;
  714. }
  715. return rc;
  716. }
  717. static void release_gmap_shadow(struct vsie_page *vsie_page)
  718. {
  719. if (vsie_page->gmap)
  720. gmap_put(vsie_page->gmap);
  721. WRITE_ONCE(vsie_page->gmap, NULL);
  722. prefix_unmapped(vsie_page);
  723. }
  724. static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
  725. struct vsie_page *vsie_page)
  726. {
  727. unsigned long asce;
  728. union ctlreg0 cr0;
  729. struct gmap *gmap;
  730. int edat;
  731. asce = vcpu->arch.sie_block->gcr[1];
  732. cr0.val = vcpu->arch.sie_block->gcr[0];
  733. edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
  734. edat += edat && test_kvm_facility(vcpu->kvm, 78);
  735. /*
  736. * ASCE or EDAT could have changed since last icpt, or the gmap
  737. * we're holding has been unshadowed. If the gmap is still valid,
  738. * we can safely reuse it.
  739. */
  740. if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
  741. return 0;
  742. /* release the old shadow - if any, and mark the prefix as unmapped */
  743. release_gmap_shadow(vsie_page);
  744. gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
  745. if (IS_ERR(gmap))
  746. return PTR_ERR(gmap);
  747. gmap->private = vcpu->kvm;
  748. WRITE_ONCE(vsie_page->gmap, gmap);
  749. return 0;
  750. }
  751. /*
  752. * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
  753. */
  754. static void register_shadow_scb(struct kvm_vcpu *vcpu,
  755. struct vsie_page *vsie_page)
  756. {
  757. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  758. WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
  759. /*
  760. * External calls have to lead to a kick of the vcpu and
  761. * therefore the vsie -> Simulate Wait state.
  762. */
  763. atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
  764. /*
  765. * We have to adjust the g3 epoch by the g2 epoch. The epoch will
  766. * automatically be adjusted on tod clock changes via kvm_sync_clock.
  767. */
  768. preempt_disable();
  769. scb_s->epoch += vcpu->kvm->arch.epoch;
  770. preempt_enable();
  771. }
  772. /*
  773. * Unregister a shadow scb from a VCPU.
  774. */
  775. static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
  776. {
  777. atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
  778. WRITE_ONCE(vcpu->arch.vsie_block, NULL);
  779. }
  780. /*
  781. * Run the vsie on a shadowed scb, managing the gmap shadow, handling
  782. * prefix pages and faults.
  783. *
  784. * Returns: - 0 if no errors occurred
  785. * - > 0 if control has to be given to guest 2
  786. * - -ENOMEM if out of memory
  787. */
  788. static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  789. {
  790. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  791. int rc = 0;
  792. while (1) {
  793. rc = acquire_gmap_shadow(vcpu, vsie_page);
  794. if (!rc)
  795. rc = map_prefix(vcpu, vsie_page);
  796. if (!rc) {
  797. gmap_enable(vsie_page->gmap);
  798. update_intervention_requests(vsie_page);
  799. rc = do_vsie_run(vcpu, vsie_page);
  800. gmap_enable(vcpu->arch.gmap);
  801. }
  802. atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
  803. if (rc == -EAGAIN)
  804. rc = 0;
  805. if (rc || scb_s->icptcode || signal_pending(current) ||
  806. kvm_s390_vcpu_has_irq(vcpu, 0))
  807. break;
  808. };
  809. if (rc == -EFAULT) {
  810. /*
  811. * Addressing exceptions are always presentes as intercepts.
  812. * As addressing exceptions are suppressing and our guest 3 PSW
  813. * points at the responsible instruction, we have to
  814. * forward the PSW and set the ilc. If we can't read guest 3
  815. * instruction, we can use an arbitrary ilc. Let's always use
  816. * ilen = 4 for now, so we can avoid reading in guest 3 virtual
  817. * memory. (we could also fake the shadow so the hardware
  818. * handles it).
  819. */
  820. scb_s->icptcode = ICPT_PROGI;
  821. scb_s->iprcc = PGM_ADDRESSING;
  822. scb_s->pgmilc = 4;
  823. scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
  824. }
  825. return rc;
  826. }
  827. /*
  828. * Get or create a vsie page for a scb address.
  829. *
  830. * Returns: - address of a vsie page (cached or new one)
  831. * - NULL if the same scb address is already used by another VCPU
  832. * - ERR_PTR(-ENOMEM) if out of memory
  833. */
  834. static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
  835. {
  836. struct vsie_page *vsie_page;
  837. struct page *page;
  838. int nr_vcpus;
  839. rcu_read_lock();
  840. page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
  841. rcu_read_unlock();
  842. if (page) {
  843. if (page_ref_inc_return(page) == 2)
  844. return page_to_virt(page);
  845. page_ref_dec(page);
  846. }
  847. /*
  848. * We want at least #online_vcpus shadows, so every VCPU can execute
  849. * the VSIE in parallel.
  850. */
  851. nr_vcpus = atomic_read(&kvm->online_vcpus);
  852. mutex_lock(&kvm->arch.vsie.mutex);
  853. if (kvm->arch.vsie.page_count < nr_vcpus) {
  854. page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
  855. if (!page) {
  856. mutex_unlock(&kvm->arch.vsie.mutex);
  857. return ERR_PTR(-ENOMEM);
  858. }
  859. page_ref_inc(page);
  860. kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
  861. kvm->arch.vsie.page_count++;
  862. } else {
  863. /* reuse an existing entry that belongs to nobody */
  864. while (true) {
  865. page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
  866. if (page_ref_inc_return(page) == 2)
  867. break;
  868. page_ref_dec(page);
  869. kvm->arch.vsie.next++;
  870. kvm->arch.vsie.next %= nr_vcpus;
  871. }
  872. radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
  873. }
  874. page->index = addr;
  875. /* double use of the same address */
  876. if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
  877. page_ref_dec(page);
  878. mutex_unlock(&kvm->arch.vsie.mutex);
  879. return NULL;
  880. }
  881. mutex_unlock(&kvm->arch.vsie.mutex);
  882. vsie_page = page_to_virt(page);
  883. memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
  884. release_gmap_shadow(vsie_page);
  885. vsie_page->fault_addr = 0;
  886. vsie_page->scb_s.ihcpu = 0xffffU;
  887. return vsie_page;
  888. }
  889. /* put a vsie page acquired via get_vsie_page */
  890. static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
  891. {
  892. struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
  893. page_ref_dec(page);
  894. }
  895. int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
  896. {
  897. struct vsie_page *vsie_page;
  898. unsigned long scb_addr;
  899. int rc;
  900. vcpu->stat.instruction_sie++;
  901. if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
  902. return -EOPNOTSUPP;
  903. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  904. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  905. BUILD_BUG_ON(sizeof(struct vsie_page) != 4096);
  906. scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
  907. /* 512 byte alignment */
  908. if (unlikely(scb_addr & 0x1ffUL))
  909. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  910. if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
  911. return 0;
  912. vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
  913. if (IS_ERR(vsie_page))
  914. return PTR_ERR(vsie_page);
  915. else if (!vsie_page)
  916. /* double use of sie control block - simply do nothing */
  917. return 0;
  918. rc = pin_scb(vcpu, vsie_page, scb_addr);
  919. if (rc)
  920. goto out_put;
  921. rc = shadow_scb(vcpu, vsie_page);
  922. if (rc)
  923. goto out_unpin_scb;
  924. rc = pin_blocks(vcpu, vsie_page);
  925. if (rc)
  926. goto out_unshadow;
  927. register_shadow_scb(vcpu, vsie_page);
  928. rc = vsie_run(vcpu, vsie_page);
  929. unregister_shadow_scb(vcpu);
  930. unpin_blocks(vcpu, vsie_page);
  931. out_unshadow:
  932. unshadow_scb(vcpu, vsie_page);
  933. out_unpin_scb:
  934. unpin_scb(vcpu, vsie_page, scb_addr);
  935. out_put:
  936. put_vsie_page(vcpu->kvm, vsie_page);
  937. return rc < 0 ? rc : 0;
  938. }
  939. /* Init the vsie data structures. To be called when a vm is initialized. */
  940. void kvm_s390_vsie_init(struct kvm *kvm)
  941. {
  942. mutex_init(&kvm->arch.vsie.mutex);
  943. INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
  944. }
  945. /* Destroy the vsie data structures. To be called when a vm is destroyed. */
  946. void kvm_s390_vsie_destroy(struct kvm *kvm)
  947. {
  948. struct vsie_page *vsie_page;
  949. struct page *page;
  950. int i;
  951. mutex_lock(&kvm->arch.vsie.mutex);
  952. for (i = 0; i < kvm->arch.vsie.page_count; i++) {
  953. page = kvm->arch.vsie.pages[i];
  954. kvm->arch.vsie.pages[i] = NULL;
  955. vsie_page = page_to_virt(page);
  956. release_gmap_shadow(vsie_page);
  957. /* free the radix tree entry */
  958. radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
  959. __free_page(page);
  960. }
  961. kvm->arch.vsie.page_count = 0;
  962. mutex_unlock(&kvm->arch.vsie.mutex);
  963. }
  964. void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
  965. {
  966. struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
  967. /*
  968. * Even if the VCPU lets go of the shadow sie block reference, it is
  969. * still valid in the cache. So we can safely kick it.
  970. */
  971. if (scb) {
  972. atomic_or(PROG_BLOCK_SIE, &scb->prog20);
  973. if (scb->prog0c & PROG_IN_SIE)
  974. atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
  975. }
  976. }