2 * guest access functions
4 * Copyright IBM Corp. 2014
8 #include <linux/vmalloc.h>
10 #include <asm/pgtable.h>
13 #include <asm/switch_to.h>
18 unsigned long origin : 52; /* Region- or Segment-Table Origin */
20 unsigned long g : 1; /* Subspace Group Control */
21 unsigned long p : 1; /* Private Space Control */
22 unsigned long s : 1; /* Storage-Alteration-Event Control */
23 unsigned long x : 1; /* Space-Switch-Event Control */
24 unsigned long r : 1; /* Real-Space Control */
26 unsigned long dt : 2; /* Designation-Type Control */
27 unsigned long tl : 2; /* Region- or Segment-Table Length */
32 ASCE_TYPE_SEGMENT = 0,
33 ASCE_TYPE_REGION3 = 1,
34 ASCE_TYPE_REGION2 = 2,
38 union region1_table_entry {
41 unsigned long rto: 52;/* Region-Table Origin */
43 unsigned long p : 1; /* DAT-Protection Bit */
45 unsigned long tf : 2; /* Region-Second-Table Offset */
46 unsigned long i : 1; /* Region-Invalid Bit */
48 unsigned long tt : 2; /* Table-Type Bits */
49 unsigned long tl : 2; /* Region-Second-Table Length */
53 union region2_table_entry {
56 unsigned long rto: 52;/* Region-Table Origin */
58 unsigned long p : 1; /* DAT-Protection Bit */
60 unsigned long tf : 2; /* Region-Third-Table Offset */
61 unsigned long i : 1; /* Region-Invalid Bit */
63 unsigned long tt : 2; /* Table-Type Bits */
64 unsigned long tl : 2; /* Region-Third-Table Length */
68 struct region3_table_entry_fc0 {
69 unsigned long sto: 52;/* Segment-Table Origin */
71 unsigned long fc : 1; /* Format-Control */
72 unsigned long p : 1; /* DAT-Protection Bit */
74 unsigned long tf : 2; /* Segment-Table Offset */
75 unsigned long i : 1; /* Region-Invalid Bit */
76 unsigned long cr : 1; /* Common-Region Bit */
77 unsigned long tt : 2; /* Table-Type Bits */
78 unsigned long tl : 2; /* Segment-Table Length */
81 struct region3_table_entry_fc1 {
82 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
84 unsigned long av : 1; /* ACCF-Validity Control */
85 unsigned long acc: 4; /* Access-Control Bits */
86 unsigned long f : 1; /* Fetch-Protection Bit */
87 unsigned long fc : 1; /* Format-Control */
88 unsigned long p : 1; /* DAT-Protection Bit */
89 unsigned long co : 1; /* Change-Recording Override */
91 unsigned long i : 1; /* Region-Invalid Bit */
92 unsigned long cr : 1; /* Common-Region Bit */
93 unsigned long tt : 2; /* Table-Type Bits */
97 union region3_table_entry {
99 struct region3_table_entry_fc0 fc0;
100 struct region3_table_entry_fc1 fc1;
103 unsigned long fc : 1; /* Format-Control */
105 unsigned long i : 1; /* Region-Invalid Bit */
106 unsigned long cr : 1; /* Common-Region Bit */
107 unsigned long tt : 2; /* Table-Type Bits */
112 struct segment_entry_fc0 {
113 unsigned long pto: 53;/* Page-Table Origin */
114 unsigned long fc : 1; /* Format-Control */
115 unsigned long p : 1; /* DAT-Protection Bit */
117 unsigned long i : 1; /* Segment-Invalid Bit */
118 unsigned long cs : 1; /* Common-Segment Bit */
119 unsigned long tt : 2; /* Table-Type Bits */
123 struct segment_entry_fc1 {
124 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
126 unsigned long av : 1; /* ACCF-Validity Control */
127 unsigned long acc: 4; /* Access-Control Bits */
128 unsigned long f : 1; /* Fetch-Protection Bit */
129 unsigned long fc : 1; /* Format-Control */
130 unsigned long p : 1; /* DAT-Protection Bit */
131 unsigned long co : 1; /* Change-Recording Override */
133 unsigned long i : 1; /* Segment-Invalid Bit */
134 unsigned long cs : 1; /* Common-Segment Bit */
135 unsigned long tt : 2; /* Table-Type Bits */
139 union segment_table_entry {
141 struct segment_entry_fc0 fc0;
142 struct segment_entry_fc1 fc1;
145 unsigned long fc : 1; /* Format-Control */
147 unsigned long i : 1; /* Segment-Invalid Bit */
148 unsigned long cs : 1; /* Common-Segment Bit */
149 unsigned long tt : 2; /* Table-Type Bits */
155 TABLE_TYPE_SEGMENT = 0,
156 TABLE_TYPE_REGION3 = 1,
157 TABLE_TYPE_REGION2 = 2,
158 TABLE_TYPE_REGION1 = 3
161 union page_table_entry {
164 unsigned long pfra : 52; /* Page-Frame Real Address */
165 unsigned long z : 1; /* Zero Bit */
166 unsigned long i : 1; /* Page-Invalid Bit */
167 unsigned long p : 1; /* DAT-Protection Bit */
168 unsigned long co : 1; /* Change-Recording Override */
174 * vaddress union in order to easily decode a virtual address into its
175 * region first index, region second index etc. parts.
180 unsigned long rfx : 11;
181 unsigned long rsx : 11;
182 unsigned long rtx : 11;
183 unsigned long sx : 11;
184 unsigned long px : 8;
185 unsigned long bx : 12;
188 unsigned long rfx01 : 2;
190 unsigned long rsx01 : 2;
192 unsigned long rtx01 : 2;
194 unsigned long sx01 : 2;
200 * raddress union which will contain the result (real or absolute address)
201 * after a page table walk. The rfaa, sfaa and pfra members are used to
202 * simply assign them the value of a region, segment or page table entry.
206 unsigned long rfaa : 33; /* Region-Frame Absolute Address */
207 unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
208 unsigned long pfra : 52; /* Page-Frame Real Address */
231 unsigned long i : 1; /* ALEN-Invalid Bit */
233 unsigned long fo : 1; /* Fetch-Only Bit */
234 unsigned long p : 1; /* Private Bit */
235 unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
236 unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
239 unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
241 unsigned long astesn : 32; /* ASTE Sequence Number */
245 unsigned long i : 1; /* ASX-Invalid Bit */
246 unsigned long ato : 29; /* Authority-Table Origin */
248 unsigned long b : 1; /* Base-Space Bit */
249 unsigned long ax : 16; /* Authorization Index */
250 unsigned long atl : 12; /* Authority-Table Length */
252 unsigned long ca : 1; /* Controlled-ASN Bit */
253 unsigned long ra : 1; /* Reusable-ASN Bit */
254 unsigned long asce : 64; /* Address-Space-Control Element */
255 unsigned long ald : 32;
256 unsigned long astesn : 32;
257 /* .. more fields there */
260 int ipte_lock_held(struct kvm_vcpu *vcpu)
262 if (vcpu->arch.sie_block->eca & 1) {
265 read_lock(&vcpu->kvm->arch.sca_lock);
266 rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
267 read_unlock(&vcpu->kvm->arch.sca_lock);
270 return vcpu->kvm->arch.ipte_lock_count != 0;
273 static void ipte_lock_simple(struct kvm_vcpu *vcpu)
275 union ipte_control old, new, *ic;
277 mutex_lock(&vcpu->kvm->arch.ipte_mutex);
278 vcpu->kvm->arch.ipte_lock_count++;
279 if (vcpu->kvm->arch.ipte_lock_count > 1)
282 read_lock(&vcpu->kvm->arch.sca_lock);
283 ic = kvm_s390_get_ipte_control(vcpu->kvm);
285 old = READ_ONCE(*ic);
287 read_unlock(&vcpu->kvm->arch.sca_lock);
293 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
294 read_unlock(&vcpu->kvm->arch.sca_lock);
296 mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
299 static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
301 union ipte_control old, new, *ic;
303 mutex_lock(&vcpu->kvm->arch.ipte_mutex);
304 vcpu->kvm->arch.ipte_lock_count--;
305 if (vcpu->kvm->arch.ipte_lock_count)
307 read_lock(&vcpu->kvm->arch.sca_lock);
308 ic = kvm_s390_get_ipte_control(vcpu->kvm);
310 old = READ_ONCE(*ic);
313 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
314 read_unlock(&vcpu->kvm->arch.sca_lock);
315 wake_up(&vcpu->kvm->arch.ipte_wq);
317 mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
320 static void ipte_lock_siif(struct kvm_vcpu *vcpu)
322 union ipte_control old, new, *ic;
325 read_lock(&vcpu->kvm->arch.sca_lock);
326 ic = kvm_s390_get_ipte_control(vcpu->kvm);
328 old = READ_ONCE(*ic);
330 read_unlock(&vcpu->kvm->arch.sca_lock);
337 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
338 read_unlock(&vcpu->kvm->arch.sca_lock);
341 static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
343 union ipte_control old, new, *ic;
345 read_lock(&vcpu->kvm->arch.sca_lock);
346 ic = kvm_s390_get_ipte_control(vcpu->kvm);
348 old = READ_ONCE(*ic);
353 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
354 read_unlock(&vcpu->kvm->arch.sca_lock);
356 wake_up(&vcpu->kvm->arch.ipte_wq);
359 void ipte_lock(struct kvm_vcpu *vcpu)
361 if (vcpu->arch.sie_block->eca & 1)
362 ipte_lock_siif(vcpu);
364 ipte_lock_simple(vcpu);
367 void ipte_unlock(struct kvm_vcpu *vcpu)
369 if (vcpu->arch.sie_block->eca & 1)
370 ipte_unlock_siif(vcpu);
372 ipte_unlock_simple(vcpu);
375 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
381 unsigned long ald_addr, authority_table_addr;
389 save_access_regs(vcpu->run->s.regs.acrs);
390 alet.val = vcpu->run->s.regs.acrs[ar];
392 if (ar == 0 || alet.val == 0) {
393 asce->val = vcpu->arch.sie_block->gcr[1];
395 } else if (alet.val == 1) {
396 asce->val = vcpu->arch.sie_block->gcr[7];
401 return PGM_ALET_SPECIFICATION;
404 ald_addr = vcpu->arch.sie_block->gcr[5];
406 ald_addr = vcpu->arch.sie_block->gcr[2];
407 ald_addr &= 0x7fffffc0;
409 rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
413 if (alet.alen / 8 > ald.all)
414 return PGM_ALEN_TRANSLATION;
416 if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
417 return PGM_ADDRESSING;
419 rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
425 return PGM_ALEN_TRANSLATION;
426 if (ale.alesn != alet.alesn)
427 return PGM_ALE_SEQUENCE;
429 rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
434 return PGM_ASTE_VALIDITY;
435 if (aste.astesn != ale.astesn)
436 return PGM_ASTE_SEQUENCE;
439 eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
440 if (ale.aleax != eax) {
441 if (eax / 16 > aste.atl)
442 return PGM_EXTENDED_AUTHORITY;
444 authority_table_addr = aste.ato * 4 + eax / 4;
446 rc = read_guest_real(vcpu, authority_table_addr,
452 if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
453 return PGM_EXTENDED_AUTHORITY;
457 if (ale.fo == 1 && mode == GACC_STORE)
458 return PGM_PROTECTION;
460 asce->val = aste.asce;
464 struct trans_exc_code_bits {
465 unsigned long addr : 52; /* Translation-exception Address */
466 unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
468 unsigned long b60 : 1;
469 unsigned long b61 : 1;
470 unsigned long as : 2; /* ASCE Identifier */
474 FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
475 FSI_STORE = 1, /* Exception was due to store operation */
476 FSI_FETCH = 2 /* Exception was due to fetch operation */
486 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
487 ar_t ar, enum gacc_mode mode, enum prot_type prot)
489 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
490 struct trans_exc_code_bits *tec;
492 memset(pgm, 0, sizeof(*pgm));
494 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
498 case PGM_PAGE_TRANSLATION:
499 case PGM_REGION_FIRST_TRANS:
500 case PGM_REGION_SECOND_TRANS:
501 case PGM_REGION_THIRD_TRANS:
502 case PGM_SEGMENT_TRANSLATION:
504 * op_access_id only applies to MOVE_PAGE -> set bit 61
505 * exc_access_id has to be set to 0 for some instructions. Both
506 * cases have to be handled by the caller. We can always store
507 * exc_access_id, as it is undefined for non-ar cases.
509 tec->addr = gva >> PAGE_SHIFT;
510 tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
511 tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
513 case PGM_ALEN_TRANSLATION:
514 case PGM_ALE_SEQUENCE:
515 case PGM_ASTE_VALIDITY:
516 case PGM_ASTE_SEQUENCE:
517 case PGM_EXTENDED_AUTHORITY:
518 pgm->exc_access_id = ar;
527 tec->addr = gva >> PAGE_SHIFT;
528 tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
529 tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
530 /* exc_access_id is undefined for most cases */
531 pgm->exc_access_id = ar;
533 default: /* LA and KEYC set b61 to 0, other params undefined */
541 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
542 unsigned long ga, ar_t ar, enum gacc_mode mode)
545 struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
546 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
547 struct trans_exc_code_bits *tec_bits;
549 memset(pgm, 0, sizeof(*pgm));
550 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
551 tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
552 tec_bits->as = psw.as;
560 if (mode == GACC_IFETCH)
561 psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
565 asce->val = vcpu->arch.sie_block->gcr[1];
567 case PSW_AS_SECONDARY:
568 asce->val = vcpu->arch.sie_block->gcr[7];
571 asce->val = vcpu->arch.sie_block->gcr[13];
574 rc = ar_translation(vcpu, asce, ar, mode);
576 case PGM_ALEN_TRANSLATION:
577 case PGM_ALE_SEQUENCE:
578 case PGM_ASTE_VALIDITY:
579 case PGM_ASTE_SEQUENCE:
580 case PGM_EXTENDED_AUTHORITY:
581 vcpu->arch.pgm.exc_access_id = ar;
584 tec_bits->addr = ga >> PAGE_SHIFT;
596 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
598 return kvm_read_guest(kvm, gpa, val, sizeof(*val));
602 * guest_translate - translate a guest virtual into a guest absolute address
604 * @gva: guest virtual address
605 * @gpa: points to where guest physical (absolute) address should be stored
606 * @asce: effective asce
607 * @mode: indicates the access mode to be used
609 * Translate a guest virtual address into a guest absolute address by means
610 * of dynamic address translation as specified by the architecture.
611 * If the resulting absolute address is not available in the configuration
612 * an addressing exception is indicated and @gpa will not be changed.
614 * Returns: - zero on success; @gpa contains the resulting absolute address
615 * - a negative value if guest access failed due to e.g. broken
617 * - a positve value if an access exception happened. In this case
618 * the returned value is the program interruption code as defined
619 * by the architecture
621 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
622 unsigned long *gpa, const union asce asce,
625 union vaddress vaddr = {.addr = gva};
626 union raddress raddr = {.addr = gva};
627 union page_table_entry pte;
628 int dat_protection = 0;
629 union ctlreg0 ctlreg0;
633 ctlreg0.val = vcpu->arch.sie_block->gcr[0];
634 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
635 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
638 ptr = asce.origin * 4096;
640 case ASCE_TYPE_REGION1:
641 if (vaddr.rfx01 > asce.tl)
642 return PGM_REGION_FIRST_TRANS;
643 ptr += vaddr.rfx * 8;
645 case ASCE_TYPE_REGION2:
647 return PGM_ASCE_TYPE;
648 if (vaddr.rsx01 > asce.tl)
649 return PGM_REGION_SECOND_TRANS;
650 ptr += vaddr.rsx * 8;
652 case ASCE_TYPE_REGION3:
653 if (vaddr.rfx || vaddr.rsx)
654 return PGM_ASCE_TYPE;
655 if (vaddr.rtx01 > asce.tl)
656 return PGM_REGION_THIRD_TRANS;
657 ptr += vaddr.rtx * 8;
659 case ASCE_TYPE_SEGMENT:
660 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
661 return PGM_ASCE_TYPE;
662 if (vaddr.sx01 > asce.tl)
663 return PGM_SEGMENT_TRANSLATION;
668 case ASCE_TYPE_REGION1: {
669 union region1_table_entry rfte;
671 if (kvm_is_error_gpa(vcpu->kvm, ptr))
672 return PGM_ADDRESSING;
673 if (deref_table(vcpu->kvm, ptr, &rfte.val))
676 return PGM_REGION_FIRST_TRANS;
677 if (rfte.tt != TABLE_TYPE_REGION1)
678 return PGM_TRANSLATION_SPEC;
679 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
680 return PGM_REGION_SECOND_TRANS;
682 dat_protection |= rfte.p;
683 ptr = rfte.rto * 4096 + vaddr.rsx * 8;
686 case ASCE_TYPE_REGION2: {
687 union region2_table_entry rste;
689 if (kvm_is_error_gpa(vcpu->kvm, ptr))
690 return PGM_ADDRESSING;
691 if (deref_table(vcpu->kvm, ptr, &rste.val))
694 return PGM_REGION_SECOND_TRANS;
695 if (rste.tt != TABLE_TYPE_REGION2)
696 return PGM_TRANSLATION_SPEC;
697 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
698 return PGM_REGION_THIRD_TRANS;
700 dat_protection |= rste.p;
701 ptr = rste.rto * 4096 + vaddr.rtx * 8;
704 case ASCE_TYPE_REGION3: {
705 union region3_table_entry rtte;
707 if (kvm_is_error_gpa(vcpu->kvm, ptr))
708 return PGM_ADDRESSING;
709 if (deref_table(vcpu->kvm, ptr, &rtte.val))
712 return PGM_REGION_THIRD_TRANS;
713 if (rtte.tt != TABLE_TYPE_REGION3)
714 return PGM_TRANSLATION_SPEC;
715 if (rtte.cr && asce.p && edat2)
716 return PGM_TRANSLATION_SPEC;
717 if (rtte.fc && edat2) {
718 dat_protection |= rtte.fc1.p;
719 raddr.rfaa = rtte.fc1.rfaa;
720 goto absolute_address;
722 if (vaddr.sx01 < rtte.fc0.tf)
723 return PGM_SEGMENT_TRANSLATION;
724 if (vaddr.sx01 > rtte.fc0.tl)
725 return PGM_SEGMENT_TRANSLATION;
727 dat_protection |= rtte.fc0.p;
728 ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
731 case ASCE_TYPE_SEGMENT: {
732 union segment_table_entry ste;
734 if (kvm_is_error_gpa(vcpu->kvm, ptr))
735 return PGM_ADDRESSING;
736 if (deref_table(vcpu->kvm, ptr, &ste.val))
739 return PGM_SEGMENT_TRANSLATION;
740 if (ste.tt != TABLE_TYPE_SEGMENT)
741 return PGM_TRANSLATION_SPEC;
742 if (ste.cs && asce.p)
743 return PGM_TRANSLATION_SPEC;
744 if (ste.fc && edat1) {
745 dat_protection |= ste.fc1.p;
746 raddr.sfaa = ste.fc1.sfaa;
747 goto absolute_address;
749 dat_protection |= ste.fc0.p;
750 ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
753 if (kvm_is_error_gpa(vcpu->kvm, ptr))
754 return PGM_ADDRESSING;
755 if (deref_table(vcpu->kvm, ptr, &pte.val))
758 return PGM_PAGE_TRANSLATION;
760 return PGM_TRANSLATION_SPEC;
761 if (pte.co && !edat1)
762 return PGM_TRANSLATION_SPEC;
763 dat_protection |= pte.p;
764 raddr.pfra = pte.pfra;
766 raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
768 if (mode == GACC_STORE && dat_protection)
769 return PGM_PROTECTION;
770 if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
771 return PGM_ADDRESSING;
776 static inline int is_low_address(unsigned long ga)
778 /* Check for address ranges 0..511 and 4096..4607 */
779 return (ga & ~0x11fful) == 0;
782 static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
783 const union asce asce)
785 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
786 psw_t *psw = &vcpu->arch.sie_block->gpsw;
790 if (psw_bits(*psw).t && asce.p)
795 static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
796 unsigned long *pages, unsigned long nr_pages,
797 const union asce asce, enum gacc_mode mode)
799 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
800 psw_t *psw = &vcpu->arch.sie_block->gpsw;
801 struct trans_exc_code_bits *tec_bits;
804 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
805 lap_enabled = low_address_protection_enabled(vcpu, asce);
807 ga = kvm_s390_logical_to_effective(vcpu, ga);
808 tec_bits->addr = ga >> PAGE_SHIFT;
809 if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) {
810 pgm->code = PGM_PROTECTION;
814 if (psw_bits(*psw).t) {
815 rc = guest_translate(vcpu, ga, pages, asce, mode);
818 if (rc == PGM_PROTECTION)
823 *pages = kvm_s390_real_to_abs(vcpu, ga);
824 if (kvm_is_error_gpa(vcpu->kvm, *pages))
825 pgm->code = PGM_ADDRESSING;
836 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
837 unsigned long len, enum gacc_mode mode)
839 psw_t *psw = &vcpu->arch.sie_block->gpsw;
840 unsigned long _len, nr_pages, gpa, idx;
841 unsigned long pages_array[2];
842 unsigned long *pages;
849 ga = kvm_s390_logical_to_effective(vcpu, ga);
850 rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
853 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
855 if (nr_pages > ARRAY_SIZE(pages_array))
856 pages = vmalloc(nr_pages * sizeof(unsigned long));
859 need_ipte_lock = psw_bits(*psw).t && !asce.r;
862 rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode);
863 for (idx = 0; idx < nr_pages && !rc; idx++) {
864 gpa = *(pages + idx) + (ga & ~PAGE_MASK);
865 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
866 if (mode == GACC_STORE)
867 rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
869 rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
876 if (nr_pages > ARRAY_SIZE(pages_array))
881 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
882 void *data, unsigned long len, enum gacc_mode mode)
884 unsigned long _len, gpa;
888 gpa = kvm_s390_real_to_abs(vcpu, gra);
889 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
891 rc = write_guest_abs(vcpu, gpa, data, _len);
893 rc = read_guest_abs(vcpu, gpa, data, _len);
902 * guest_translate_address - translate guest logical into guest absolute address
904 * Parameter semantics are the same as the ones from guest_translate.
905 * The memory contents at the guest address are not changed.
907 * Note: The IPTE lock is not taken during this function, so the caller
908 * has to take care of this.
910 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
911 unsigned long *gpa, enum gacc_mode mode)
913 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
914 psw_t *psw = &vcpu->arch.sie_block->gpsw;
915 struct trans_exc_code_bits *tec;
919 gva = kvm_s390_logical_to_effective(vcpu, gva);
920 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
921 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
922 tec->addr = gva >> PAGE_SHIFT;
925 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
926 if (mode == GACC_STORE) {
927 rc = pgm->code = PGM_PROTECTION;
932 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
933 rc = guest_translate(vcpu, gva, gpa, asce, mode);
935 if (rc == PGM_PROTECTION)
941 *gpa = kvm_s390_real_to_abs(vcpu, gva);
942 if (kvm_is_error_gpa(vcpu->kvm, *gpa))
943 rc = pgm->code = PGM_ADDRESSING;
950 * check_gva_range - test a range of guest virtual addresses for accessibility
952 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
953 unsigned long length, enum gacc_mode mode)
956 unsigned long currlen;
960 while (length > 0 && !rc) {
961 currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
962 rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
972 * kvm_s390_check_low_addr_prot_real - check for low-address protection
973 * @gra: Guest real address
975 * Checks whether an address is subject to low-address protection and set
976 * up vcpu->arch.pgm accordingly if necessary.
978 * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
980 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
982 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
984 if (!ctlreg0.lap || !is_low_address(gra))
986 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);