arm64: KVM: fix unmapping with 48-bit VAs
[linux.git] / arch / arm / kvm / mmu.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
21 #include <linux/io.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
31
32 #include "trace.h"
33
34 extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
36 static pgd_t *boot_hyp_pgd;
37 static pgd_t *hyp_pgd;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
40 static void *init_bounce_page;
41 static unsigned long hyp_idmap_start;
42 static unsigned long hyp_idmap_end;
43 static phys_addr_t hyp_idmap_vector;
44
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
46
47 #define kvm_pmd_huge(_x)        (pmd_huge(_x) || pmd_trans_huge(_x))
48
49 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
50 {
51         /*
52          * This function also gets called when dealing with HYP page
53          * tables. As HYP doesn't have an associated struct kvm (and
54          * the HYP page tables are fairly static), we don't do
55          * anything there.
56          */
57         if (kvm)
58                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
59 }
60
61 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62                                   int min, int max)
63 {
64         void *page;
65
66         BUG_ON(max > KVM_NR_MEM_OBJS);
67         if (cache->nobjs >= min)
68                 return 0;
69         while (cache->nobjs < max) {
70                 page = (void *)__get_free_page(PGALLOC_GFP);
71                 if (!page)
72                         return -ENOMEM;
73                 cache->objects[cache->nobjs++] = page;
74         }
75         return 0;
76 }
77
78 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
79 {
80         while (mc->nobjs)
81                 free_page((unsigned long)mc->objects[--mc->nobjs]);
82 }
83
84 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85 {
86         void *p;
87
88         BUG_ON(!mc || !mc->nobjs);
89         p = mc->objects[--mc->nobjs];
90         return p;
91 }
92
93 static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
94 {
95         pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96         pgd_clear(pgd);
97         kvm_tlb_flush_vmid_ipa(kvm, addr);
98         pud_free(NULL, pud_table);
99         put_page(virt_to_page(pgd));
100 }
101
102 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
103 {
104         pmd_t *pmd_table = pmd_offset(pud, 0);
105         VM_BUG_ON(pud_huge(*pud));
106         pud_clear(pud);
107         kvm_tlb_flush_vmid_ipa(kvm, addr);
108         pmd_free(NULL, pmd_table);
109         put_page(virt_to_page(pud));
110 }
111
112 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
113 {
114         pte_t *pte_table = pte_offset_kernel(pmd, 0);
115         VM_BUG_ON(kvm_pmd_huge(*pmd));
116         pmd_clear(pmd);
117         kvm_tlb_flush_vmid_ipa(kvm, addr);
118         pte_free_kernel(NULL, pte_table);
119         put_page(virt_to_page(pmd));
120 }
121
122 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123                        phys_addr_t addr, phys_addr_t end)
124 {
125         phys_addr_t start_addr = addr;
126         pte_t *pte, *start_pte;
127
128         start_pte = pte = pte_offset_kernel(pmd, addr);
129         do {
130                 if (!pte_none(*pte)) {
131                         kvm_set_pte(pte, __pte(0));
132                         put_page(virt_to_page(pte));
133                         kvm_tlb_flush_vmid_ipa(kvm, addr);
134                 }
135         } while (pte++, addr += PAGE_SIZE, addr != end);
136
137         if (kvm_pte_table_empty(kvm, start_pte))
138                 clear_pmd_entry(kvm, pmd, start_addr);
139 }
140
141 static void unmap_pmds(struct kvm *kvm, pud_t *pud,
142                        phys_addr_t addr, phys_addr_t end)
143 {
144         phys_addr_t next, start_addr = addr;
145         pmd_t *pmd, *start_pmd;
146
147         start_pmd = pmd = pmd_offset(pud, addr);
148         do {
149                 next = kvm_pmd_addr_end(addr, end);
150                 if (!pmd_none(*pmd)) {
151                         if (kvm_pmd_huge(*pmd)) {
152                                 pmd_clear(pmd);
153                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
154                                 put_page(virt_to_page(pmd));
155                         } else {
156                                 unmap_ptes(kvm, pmd, addr, next);
157                         }
158                 }
159         } while (pmd++, addr = next, addr != end);
160
161         if (kvm_pmd_table_empty(kvm, start_pmd))
162                 clear_pud_entry(kvm, pud, start_addr);
163 }
164
165 static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
166                        phys_addr_t addr, phys_addr_t end)
167 {
168         phys_addr_t next, start_addr = addr;
169         pud_t *pud, *start_pud;
170
171         start_pud = pud = pud_offset(pgd, addr);
172         do {
173                 next = kvm_pud_addr_end(addr, end);
174                 if (!pud_none(*pud)) {
175                         if (pud_huge(*pud)) {
176                                 pud_clear(pud);
177                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
178                                 put_page(virt_to_page(pud));
179                         } else {
180                                 unmap_pmds(kvm, pud, addr, next);
181                         }
182                 }
183         } while (pud++, addr = next, addr != end);
184
185         if (kvm_pud_table_empty(kvm, start_pud))
186                 clear_pgd_entry(kvm, pgd, start_addr);
187 }
188
189
190 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191                         phys_addr_t start, u64 size)
192 {
193         pgd_t *pgd;
194         phys_addr_t addr = start, end = start + size;
195         phys_addr_t next;
196
197         pgd = pgdp + pgd_index(addr);
198         do {
199                 next = kvm_pgd_addr_end(addr, end);
200                 if (!pgd_none(*pgd))
201                         unmap_puds(kvm, pgd, addr, next);
202         } while (pgd++, addr = next, addr != end);
203 }
204
205 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
206                               phys_addr_t addr, phys_addr_t end)
207 {
208         pte_t *pte;
209
210         pte = pte_offset_kernel(pmd, addr);
211         do {
212                 if (!pte_none(*pte)) {
213                         hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
214                         kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
215                 }
216         } while (pte++, addr += PAGE_SIZE, addr != end);
217 }
218
219 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
220                               phys_addr_t addr, phys_addr_t end)
221 {
222         pmd_t *pmd;
223         phys_addr_t next;
224
225         pmd = pmd_offset(pud, addr);
226         do {
227                 next = kvm_pmd_addr_end(addr, end);
228                 if (!pmd_none(*pmd)) {
229                         if (kvm_pmd_huge(*pmd)) {
230                                 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
231                                 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
232                         } else {
233                                 stage2_flush_ptes(kvm, pmd, addr, next);
234                         }
235                 }
236         } while (pmd++, addr = next, addr != end);
237 }
238
239 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
240                               phys_addr_t addr, phys_addr_t end)
241 {
242         pud_t *pud;
243         phys_addr_t next;
244
245         pud = pud_offset(pgd, addr);
246         do {
247                 next = kvm_pud_addr_end(addr, end);
248                 if (!pud_none(*pud)) {
249                         if (pud_huge(*pud)) {
250                                 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
251                                 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
252                         } else {
253                                 stage2_flush_pmds(kvm, pud, addr, next);
254                         }
255                 }
256         } while (pud++, addr = next, addr != end);
257 }
258
259 static void stage2_flush_memslot(struct kvm *kvm,
260                                  struct kvm_memory_slot *memslot)
261 {
262         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
263         phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
264         phys_addr_t next;
265         pgd_t *pgd;
266
267         pgd = kvm->arch.pgd + pgd_index(addr);
268         do {
269                 next = kvm_pgd_addr_end(addr, end);
270                 stage2_flush_puds(kvm, pgd, addr, next);
271         } while (pgd++, addr = next, addr != end);
272 }
273
274 /**
275  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
276  * @kvm: The struct kvm pointer
277  *
278  * Go through the stage 2 page tables and invalidate any cache lines
279  * backing memory already mapped to the VM.
280  */
281 void stage2_flush_vm(struct kvm *kvm)
282 {
283         struct kvm_memslots *slots;
284         struct kvm_memory_slot *memslot;
285         int idx;
286
287         idx = srcu_read_lock(&kvm->srcu);
288         spin_lock(&kvm->mmu_lock);
289
290         slots = kvm_memslots(kvm);
291         kvm_for_each_memslot(memslot, slots)
292                 stage2_flush_memslot(kvm, memslot);
293
294         spin_unlock(&kvm->mmu_lock);
295         srcu_read_unlock(&kvm->srcu, idx);
296 }
297
298 /**
299  * free_boot_hyp_pgd - free HYP boot page tables
300  *
301  * Free the HYP boot page tables. The bounce page is also freed.
302  */
303 void free_boot_hyp_pgd(void)
304 {
305         mutex_lock(&kvm_hyp_pgd_mutex);
306
307         if (boot_hyp_pgd) {
308                 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
309                 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
310                 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
311                 boot_hyp_pgd = NULL;
312         }
313
314         if (hyp_pgd)
315                 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
316
317         free_page((unsigned long)init_bounce_page);
318         init_bounce_page = NULL;
319
320         mutex_unlock(&kvm_hyp_pgd_mutex);
321 }
322
323 /**
324  * free_hyp_pgds - free Hyp-mode page tables
325  *
326  * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
327  * therefore contains either mappings in the kernel memory area (above
328  * PAGE_OFFSET), or device mappings in the vmalloc range (from
329  * VMALLOC_START to VMALLOC_END).
330  *
331  * boot_hyp_pgd should only map two pages for the init code.
332  */
333 void free_hyp_pgds(void)
334 {
335         unsigned long addr;
336
337         free_boot_hyp_pgd();
338
339         mutex_lock(&kvm_hyp_pgd_mutex);
340
341         if (hyp_pgd) {
342                 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
343                         unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
344                 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
345                         unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
346
347                 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
348                 hyp_pgd = NULL;
349         }
350
351         mutex_unlock(&kvm_hyp_pgd_mutex);
352 }
353
354 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
355                                     unsigned long end, unsigned long pfn,
356                                     pgprot_t prot)
357 {
358         pte_t *pte;
359         unsigned long addr;
360
361         addr = start;
362         do {
363                 pte = pte_offset_kernel(pmd, addr);
364                 kvm_set_pte(pte, pfn_pte(pfn, prot));
365                 get_page(virt_to_page(pte));
366                 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
367                 pfn++;
368         } while (addr += PAGE_SIZE, addr != end);
369 }
370
371 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
372                                    unsigned long end, unsigned long pfn,
373                                    pgprot_t prot)
374 {
375         pmd_t *pmd;
376         pte_t *pte;
377         unsigned long addr, next;
378
379         addr = start;
380         do {
381                 pmd = pmd_offset(pud, addr);
382
383                 BUG_ON(pmd_sect(*pmd));
384
385                 if (pmd_none(*pmd)) {
386                         pte = pte_alloc_one_kernel(NULL, addr);
387                         if (!pte) {
388                                 kvm_err("Cannot allocate Hyp pte\n");
389                                 return -ENOMEM;
390                         }
391                         pmd_populate_kernel(NULL, pmd, pte);
392                         get_page(virt_to_page(pmd));
393                         kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
394                 }
395
396                 next = pmd_addr_end(addr, end);
397
398                 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
399                 pfn += (next - addr) >> PAGE_SHIFT;
400         } while (addr = next, addr != end);
401
402         return 0;
403 }
404
405 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
406                                    unsigned long end, unsigned long pfn,
407                                    pgprot_t prot)
408 {
409         pud_t *pud;
410         pmd_t *pmd;
411         unsigned long addr, next;
412         int ret;
413
414         addr = start;
415         do {
416                 pud = pud_offset(pgd, addr);
417
418                 if (pud_none_or_clear_bad(pud)) {
419                         pmd = pmd_alloc_one(NULL, addr);
420                         if (!pmd) {
421                                 kvm_err("Cannot allocate Hyp pmd\n");
422                                 return -ENOMEM;
423                         }
424                         pud_populate(NULL, pud, pmd);
425                         get_page(virt_to_page(pud));
426                         kvm_flush_dcache_to_poc(pud, sizeof(*pud));
427                 }
428
429                 next = pud_addr_end(addr, end);
430                 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
431                 if (ret)
432                         return ret;
433                 pfn += (next - addr) >> PAGE_SHIFT;
434         } while (addr = next, addr != end);
435
436         return 0;
437 }
438
439 static int __create_hyp_mappings(pgd_t *pgdp,
440                                  unsigned long start, unsigned long end,
441                                  unsigned long pfn, pgprot_t prot)
442 {
443         pgd_t *pgd;
444         pud_t *pud;
445         unsigned long addr, next;
446         int err = 0;
447
448         mutex_lock(&kvm_hyp_pgd_mutex);
449         addr = start & PAGE_MASK;
450         end = PAGE_ALIGN(end);
451         do {
452                 pgd = pgdp + pgd_index(addr);
453
454                 if (pgd_none(*pgd)) {
455                         pud = pud_alloc_one(NULL, addr);
456                         if (!pud) {
457                                 kvm_err("Cannot allocate Hyp pud\n");
458                                 err = -ENOMEM;
459                                 goto out;
460                         }
461                         pgd_populate(NULL, pgd, pud);
462                         get_page(virt_to_page(pgd));
463                         kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
464                 }
465
466                 next = pgd_addr_end(addr, end);
467                 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
468                 if (err)
469                         goto out;
470                 pfn += (next - addr) >> PAGE_SHIFT;
471         } while (addr = next, addr != end);
472 out:
473         mutex_unlock(&kvm_hyp_pgd_mutex);
474         return err;
475 }
476
477 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
478 {
479         if (!is_vmalloc_addr(kaddr)) {
480                 BUG_ON(!virt_addr_valid(kaddr));
481                 return __pa(kaddr);
482         } else {
483                 return page_to_phys(vmalloc_to_page(kaddr)) +
484                        offset_in_page(kaddr);
485         }
486 }
487
488 /**
489  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
490  * @from:       The virtual kernel start address of the range
491  * @to:         The virtual kernel end address of the range (exclusive)
492  *
493  * The same virtual address as the kernel virtual address is also used
494  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
495  * physical pages.
496  */
497 int create_hyp_mappings(void *from, void *to)
498 {
499         phys_addr_t phys_addr;
500         unsigned long virt_addr;
501         unsigned long start = KERN_TO_HYP((unsigned long)from);
502         unsigned long end = KERN_TO_HYP((unsigned long)to);
503
504         start = start & PAGE_MASK;
505         end = PAGE_ALIGN(end);
506
507         for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
508                 int err;
509
510                 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
511                 err = __create_hyp_mappings(hyp_pgd, virt_addr,
512                                             virt_addr + PAGE_SIZE,
513                                             __phys_to_pfn(phys_addr),
514                                             PAGE_HYP);
515                 if (err)
516                         return err;
517         }
518
519         return 0;
520 }
521
522 /**
523  * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
524  * @from:       The kernel start VA of the range
525  * @to:         The kernel end VA of the range (exclusive)
526  * @phys_addr:  The physical start address which gets mapped
527  *
528  * The resulting HYP VA is the same as the kernel VA, modulo
529  * HYP_PAGE_OFFSET.
530  */
531 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
532 {
533         unsigned long start = KERN_TO_HYP((unsigned long)from);
534         unsigned long end = KERN_TO_HYP((unsigned long)to);
535
536         /* Check for a valid kernel IO mapping */
537         if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
538                 return -EINVAL;
539
540         return __create_hyp_mappings(hyp_pgd, start, end,
541                                      __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
542 }
543
544 /**
545  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
546  * @kvm:        The KVM struct pointer for the VM.
547  *
548  * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
549  * support either full 40-bit input addresses or limited to 32-bit input
550  * addresses). Clears the allocated pages.
551  *
552  * Note we don't need locking here as this is only called when the VM is
553  * created, which can only be done once.
554  */
555 int kvm_alloc_stage2_pgd(struct kvm *kvm)
556 {
557         int ret;
558         pgd_t *pgd;
559
560         if (kvm->arch.pgd != NULL) {
561                 kvm_err("kvm_arch already initialized?\n");
562                 return -EINVAL;
563         }
564
565         if (KVM_PREALLOC_LEVEL > 0) {
566                 /*
567                  * Allocate fake pgd for the page table manipulation macros to
568                  * work.  This is not used by the hardware and we have no
569                  * alignment requirement for this allocation.
570                  */
571                 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
572                                        GFP_KERNEL | __GFP_ZERO);
573         } else {
574                 /*
575                  * Allocate actual first-level Stage-2 page table used by the
576                  * hardware for Stage-2 page table walks.
577                  */
578                 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
579         }
580
581         if (!pgd)
582                 return -ENOMEM;
583
584         ret = kvm_prealloc_hwpgd(kvm, pgd);
585         if (ret)
586                 goto out_err;
587
588         kvm_clean_pgd(pgd);
589         kvm->arch.pgd = pgd;
590         return 0;
591 out_err:
592         if (KVM_PREALLOC_LEVEL > 0)
593                 kfree(pgd);
594         else
595                 free_pages((unsigned long)pgd, S2_PGD_ORDER);
596         return ret;
597 }
598
599 /**
600  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
601  * @kvm:   The VM pointer
602  * @start: The intermediate physical base address of the range to unmap
603  * @size:  The size of the area to unmap
604  *
605  * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
606  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
607  * destroying the VM), otherwise another faulting VCPU may come in and mess
608  * with things behind our backs.
609  */
610 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
611 {
612         unmap_range(kvm, kvm->arch.pgd, start, size);
613 }
614
615 /**
616  * kvm_free_stage2_pgd - free all stage-2 tables
617  * @kvm:        The KVM struct pointer for the VM.
618  *
619  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
620  * underlying level-2 and level-3 tables before freeing the actual level-1 table
621  * and setting the struct pointer to NULL.
622  *
623  * Note we don't need locking here as this is only called when the VM is
624  * destroyed, which can only be done once.
625  */
626 void kvm_free_stage2_pgd(struct kvm *kvm)
627 {
628         if (kvm->arch.pgd == NULL)
629                 return;
630
631         unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
632         kvm_free_hwpgd(kvm);
633         if (KVM_PREALLOC_LEVEL > 0)
634                 kfree(kvm->arch.pgd);
635         else
636                 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
637         kvm->arch.pgd = NULL;
638 }
639
640 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
641                              phys_addr_t addr)
642 {
643         pgd_t *pgd;
644         pud_t *pud;
645
646         pgd = kvm->arch.pgd + pgd_index(addr);
647         if (WARN_ON(pgd_none(*pgd))) {
648                 if (!cache)
649                         return NULL;
650                 pud = mmu_memory_cache_alloc(cache);
651                 pgd_populate(NULL, pgd, pud);
652                 get_page(virt_to_page(pgd));
653         }
654
655         return pud_offset(pgd, addr);
656 }
657
658 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
659                              phys_addr_t addr)
660 {
661         pud_t *pud;
662         pmd_t *pmd;
663
664         pud = stage2_get_pud(kvm, cache, addr);
665         if (pud_none(*pud)) {
666                 if (!cache)
667                         return NULL;
668                 pmd = mmu_memory_cache_alloc(cache);
669                 pud_populate(NULL, pud, pmd);
670                 get_page(virt_to_page(pud));
671         }
672
673         return pmd_offset(pud, addr);
674 }
675
676 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
677                                *cache, phys_addr_t addr, const pmd_t *new_pmd)
678 {
679         pmd_t *pmd, old_pmd;
680
681         pmd = stage2_get_pmd(kvm, cache, addr);
682         VM_BUG_ON(!pmd);
683
684         /*
685          * Mapping in huge pages should only happen through a fault.  If a
686          * page is merged into a transparent huge page, the individual
687          * subpages of that huge page should be unmapped through MMU
688          * notifiers before we get here.
689          *
690          * Merging of CompoundPages is not supported; they should become
691          * splitting first, unmapped, merged, and mapped back in on-demand.
692          */
693         VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
694
695         old_pmd = *pmd;
696         kvm_set_pmd(pmd, *new_pmd);
697         if (pmd_present(old_pmd))
698                 kvm_tlb_flush_vmid_ipa(kvm, addr);
699         else
700                 get_page(virt_to_page(pmd));
701         return 0;
702 }
703
704 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
705                           phys_addr_t addr, const pte_t *new_pte, bool iomap)
706 {
707         pmd_t *pmd;
708         pte_t *pte, old_pte;
709
710         /* Create stage-2 page table mapping - Levels 0 and 1 */
711         pmd = stage2_get_pmd(kvm, cache, addr);
712         if (!pmd) {
713                 /*
714                  * Ignore calls from kvm_set_spte_hva for unallocated
715                  * address ranges.
716                  */
717                 return 0;
718         }
719
720         /* Create stage-2 page mappings - Level 2 */
721         if (pmd_none(*pmd)) {
722                 if (!cache)
723                         return 0; /* ignore calls from kvm_set_spte_hva */
724                 pte = mmu_memory_cache_alloc(cache);
725                 kvm_clean_pte(pte);
726                 pmd_populate_kernel(NULL, pmd, pte);
727                 get_page(virt_to_page(pmd));
728         }
729
730         pte = pte_offset_kernel(pmd, addr);
731
732         if (iomap && pte_present(*pte))
733                 return -EFAULT;
734
735         /* Create 2nd stage page table mapping - Level 3 */
736         old_pte = *pte;
737         kvm_set_pte(pte, *new_pte);
738         if (pte_present(old_pte))
739                 kvm_tlb_flush_vmid_ipa(kvm, addr);
740         else
741                 get_page(virt_to_page(pte));
742
743         return 0;
744 }
745
746 /**
747  * kvm_phys_addr_ioremap - map a device range to guest IPA
748  *
749  * @kvm:        The KVM pointer
750  * @guest_ipa:  The IPA at which to insert the mapping
751  * @pa:         The physical address of the device
752  * @size:       The size of the mapping
753  */
754 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
755                           phys_addr_t pa, unsigned long size, bool writable)
756 {
757         phys_addr_t addr, end;
758         int ret = 0;
759         unsigned long pfn;
760         struct kvm_mmu_memory_cache cache = { 0, };
761
762         end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
763         pfn = __phys_to_pfn(pa);
764
765         for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
766                 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
767
768                 if (writable)
769                         kvm_set_s2pte_writable(&pte);
770
771                 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
772                                                 KVM_NR_MEM_OBJS);
773                 if (ret)
774                         goto out;
775                 spin_lock(&kvm->mmu_lock);
776                 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
777                 spin_unlock(&kvm->mmu_lock);
778                 if (ret)
779                         goto out;
780
781                 pfn++;
782         }
783
784 out:
785         mmu_free_memory_cache(&cache);
786         return ret;
787 }
788
789 static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
790 {
791         pfn_t pfn = *pfnp;
792         gfn_t gfn = *ipap >> PAGE_SHIFT;
793
794         if (PageTransCompound(pfn_to_page(pfn))) {
795                 unsigned long mask;
796                 /*
797                  * The address we faulted on is backed by a transparent huge
798                  * page.  However, because we map the compound huge page and
799                  * not the individual tail page, we need to transfer the
800                  * refcount to the head page.  We have to be careful that the
801                  * THP doesn't start to split while we are adjusting the
802                  * refcounts.
803                  *
804                  * We are sure this doesn't happen, because mmu_notifier_retry
805                  * was successful and we are holding the mmu_lock, so if this
806                  * THP is trying to split, it will be blocked in the mmu
807                  * notifier before touching any of the pages, specifically
808                  * before being able to call __split_huge_page_refcount().
809                  *
810                  * We can therefore safely transfer the refcount from PG_tail
811                  * to PG_head and switch the pfn from a tail page to the head
812                  * page accordingly.
813                  */
814                 mask = PTRS_PER_PMD - 1;
815                 VM_BUG_ON((gfn & mask) != (pfn & mask));
816                 if (pfn & mask) {
817                         *ipap &= PMD_MASK;
818                         kvm_release_pfn_clean(pfn);
819                         pfn &= ~mask;
820                         kvm_get_pfn(pfn);
821                         *pfnp = pfn;
822                 }
823
824                 return true;
825         }
826
827         return false;
828 }
829
830 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
831 {
832         if (kvm_vcpu_trap_is_iabt(vcpu))
833                 return false;
834
835         return kvm_vcpu_dabt_iswrite(vcpu);
836 }
837
838 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
839                           struct kvm_memory_slot *memslot, unsigned long hva,
840                           unsigned long fault_status)
841 {
842         int ret;
843         bool write_fault, writable, hugetlb = false, force_pte = false;
844         unsigned long mmu_seq;
845         gfn_t gfn = fault_ipa >> PAGE_SHIFT;
846         struct kvm *kvm = vcpu->kvm;
847         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
848         struct vm_area_struct *vma;
849         pfn_t pfn;
850         pgprot_t mem_type = PAGE_S2;
851
852         write_fault = kvm_is_write_fault(vcpu);
853         if (fault_status == FSC_PERM && !write_fault) {
854                 kvm_err("Unexpected L2 read permission error\n");
855                 return -EFAULT;
856         }
857
858         /* Let's check if we will get back a huge page backed by hugetlbfs */
859         down_read(&current->mm->mmap_sem);
860         vma = find_vma_intersection(current->mm, hva, hva + 1);
861         if (unlikely(!vma)) {
862                 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
863                 up_read(&current->mm->mmap_sem);
864                 return -EFAULT;
865         }
866
867         if (is_vm_hugetlb_page(vma)) {
868                 hugetlb = true;
869                 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
870         } else {
871                 /*
872                  * Pages belonging to memslots that don't have the same
873                  * alignment for userspace and IPA cannot be mapped using
874                  * block descriptors even if the pages belong to a THP for
875                  * the process, because the stage-2 block descriptor will
876                  * cover more than a single THP and we loose atomicity for
877                  * unmapping, updates, and splits of the THP or other pages
878                  * in the stage-2 block range.
879                  */
880                 if ((memslot->userspace_addr & ~PMD_MASK) !=
881                     ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
882                         force_pte = true;
883         }
884         up_read(&current->mm->mmap_sem);
885
886         /* We need minimum second+third level pages */
887         ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
888                                      KVM_NR_MEM_OBJS);
889         if (ret)
890                 return ret;
891
892         mmu_seq = vcpu->kvm->mmu_notifier_seq;
893         /*
894          * Ensure the read of mmu_notifier_seq happens before we call
895          * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
896          * the page we just got a reference to gets unmapped before we have a
897          * chance to grab the mmu_lock, which ensure that if the page gets
898          * unmapped afterwards, the call to kvm_unmap_hva will take it away
899          * from us again properly. This smp_rmb() interacts with the smp_wmb()
900          * in kvm_mmu_notifier_invalidate_<page|range_end>.
901          */
902         smp_rmb();
903
904         pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
905         if (is_error_pfn(pfn))
906                 return -EFAULT;
907
908         if (kvm_is_mmio_pfn(pfn))
909                 mem_type = PAGE_S2_DEVICE;
910
911         spin_lock(&kvm->mmu_lock);
912         if (mmu_notifier_retry(kvm, mmu_seq))
913                 goto out_unlock;
914         if (!hugetlb && !force_pte)
915                 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
916
917         if (hugetlb) {
918                 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
919                 new_pmd = pmd_mkhuge(new_pmd);
920                 if (writable) {
921                         kvm_set_s2pmd_writable(&new_pmd);
922                         kvm_set_pfn_dirty(pfn);
923                 }
924                 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
925                 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
926         } else {
927                 pte_t new_pte = pfn_pte(pfn, mem_type);
928                 if (writable) {
929                         kvm_set_s2pte_writable(&new_pte);
930                         kvm_set_pfn_dirty(pfn);
931                 }
932                 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
933                 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
934                         pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
935         }
936
937
938 out_unlock:
939         spin_unlock(&kvm->mmu_lock);
940         kvm_release_pfn_clean(pfn);
941         return ret;
942 }
943
944 /**
945  * kvm_handle_guest_abort - handles all 2nd stage aborts
946  * @vcpu:       the VCPU pointer
947  * @run:        the kvm_run structure
948  *
949  * Any abort that gets to the host is almost guaranteed to be caused by a
950  * missing second stage translation table entry, which can mean that either the
951  * guest simply needs more memory and we must allocate an appropriate page or it
952  * can mean that the guest tried to access I/O memory, which is emulated by user
953  * space. The distinction is based on the IPA causing the fault and whether this
954  * memory region has been registered as standard RAM by user space.
955  */
956 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
957 {
958         unsigned long fault_status;
959         phys_addr_t fault_ipa;
960         struct kvm_memory_slot *memslot;
961         unsigned long hva;
962         bool is_iabt, write_fault, writable;
963         gfn_t gfn;
964         int ret, idx;
965
966         is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
967         fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
968
969         trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
970                               kvm_vcpu_get_hfar(vcpu), fault_ipa);
971
972         /* Check the stage-2 fault is trans. fault or write fault */
973         fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
974         if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
975                 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
976                         kvm_vcpu_trap_get_class(vcpu),
977                         (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
978                         (unsigned long)kvm_vcpu_get_hsr(vcpu));
979                 return -EFAULT;
980         }
981
982         idx = srcu_read_lock(&vcpu->kvm->srcu);
983
984         gfn = fault_ipa >> PAGE_SHIFT;
985         memslot = gfn_to_memslot(vcpu->kvm, gfn);
986         hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
987         write_fault = kvm_is_write_fault(vcpu);
988         if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
989                 if (is_iabt) {
990                         /* Prefetch Abort on I/O address */
991                         kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
992                         ret = 1;
993                         goto out_unlock;
994                 }
995
996                 /*
997                  * The IPA is reported as [MAX:12], so we need to
998                  * complement it with the bottom 12 bits from the
999                  * faulting VA. This is always 12 bits, irrespective
1000                  * of the page size.
1001                  */
1002                 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1003                 ret = io_mem_abort(vcpu, run, fault_ipa);
1004                 goto out_unlock;
1005         }
1006
1007         /* Userspace should not be able to register out-of-bounds IPAs */
1008         VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1009
1010         ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1011         if (ret == 0)
1012                 ret = 1;
1013 out_unlock:
1014         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1015         return ret;
1016 }
1017
1018 static void handle_hva_to_gpa(struct kvm *kvm,
1019                               unsigned long start,
1020                               unsigned long end,
1021                               void (*handler)(struct kvm *kvm,
1022                                               gpa_t gpa, void *data),
1023                               void *data)
1024 {
1025         struct kvm_memslots *slots;
1026         struct kvm_memory_slot *memslot;
1027
1028         slots = kvm_memslots(kvm);
1029
1030         /* we only care about the pages that the guest sees */
1031         kvm_for_each_memslot(memslot, slots) {
1032                 unsigned long hva_start, hva_end;
1033                 gfn_t gfn, gfn_end;
1034
1035                 hva_start = max(start, memslot->userspace_addr);
1036                 hva_end = min(end, memslot->userspace_addr +
1037                                         (memslot->npages << PAGE_SHIFT));
1038                 if (hva_start >= hva_end)
1039                         continue;
1040
1041                 /*
1042                  * {gfn(page) | page intersects with [hva_start, hva_end)} =
1043                  * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1044                  */
1045                 gfn = hva_to_gfn_memslot(hva_start, memslot);
1046                 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1047
1048                 for (; gfn < gfn_end; ++gfn) {
1049                         gpa_t gpa = gfn << PAGE_SHIFT;
1050                         handler(kvm, gpa, data);
1051                 }
1052         }
1053 }
1054
1055 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1056 {
1057         unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1058 }
1059
1060 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1061 {
1062         unsigned long end = hva + PAGE_SIZE;
1063
1064         if (!kvm->arch.pgd)
1065                 return 0;
1066
1067         trace_kvm_unmap_hva(hva);
1068         handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1069         return 0;
1070 }
1071
1072 int kvm_unmap_hva_range(struct kvm *kvm,
1073                         unsigned long start, unsigned long end)
1074 {
1075         if (!kvm->arch.pgd)
1076                 return 0;
1077
1078         trace_kvm_unmap_hva_range(start, end);
1079         handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1080         return 0;
1081 }
1082
1083 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1084 {
1085         pte_t *pte = (pte_t *)data;
1086
1087         stage2_set_pte(kvm, NULL, gpa, pte, false);
1088 }
1089
1090
1091 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1092 {
1093         unsigned long end = hva + PAGE_SIZE;
1094         pte_t stage2_pte;
1095
1096         if (!kvm->arch.pgd)
1097                 return;
1098
1099         trace_kvm_set_spte_hva(hva);
1100         stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1101         handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1102 }
1103
1104 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1105 {
1106         mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1107 }
1108
1109 phys_addr_t kvm_mmu_get_httbr(void)
1110 {
1111         return virt_to_phys(hyp_pgd);
1112 }
1113
1114 phys_addr_t kvm_mmu_get_boot_httbr(void)
1115 {
1116         return virt_to_phys(boot_hyp_pgd);
1117 }
1118
1119 phys_addr_t kvm_get_idmap_vector(void)
1120 {
1121         return hyp_idmap_vector;
1122 }
1123
1124 int kvm_mmu_init(void)
1125 {
1126         int err;
1127
1128         hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1129         hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1130         hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1131
1132         if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1133                 /*
1134                  * Our init code is crossing a page boundary. Allocate
1135                  * a bounce page, copy the code over and use that.
1136                  */
1137                 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1138                 phys_addr_t phys_base;
1139
1140                 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
1141                 if (!init_bounce_page) {
1142                         kvm_err("Couldn't allocate HYP init bounce page\n");
1143                         err = -ENOMEM;
1144                         goto out;
1145                 }
1146
1147                 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1148                 /*
1149                  * Warning: the code we just copied to the bounce page
1150                  * must be flushed to the point of coherency.
1151                  * Otherwise, the data may be sitting in L2, and HYP
1152                  * mode won't be able to observe it as it runs with
1153                  * caches off at that point.
1154                  */
1155                 kvm_flush_dcache_to_poc(init_bounce_page, len);
1156
1157                 phys_base = kvm_virt_to_phys(init_bounce_page);
1158                 hyp_idmap_vector += phys_base - hyp_idmap_start;
1159                 hyp_idmap_start = phys_base;
1160                 hyp_idmap_end = phys_base + len;
1161
1162                 kvm_info("Using HYP init bounce page @%lx\n",
1163                          (unsigned long)phys_base);
1164         }
1165
1166         hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1167         boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1168
1169         if (!hyp_pgd || !boot_hyp_pgd) {
1170                 kvm_err("Hyp mode PGD not allocated\n");
1171                 err = -ENOMEM;
1172                 goto out;
1173         }
1174
1175         /* Create the idmap in the boot page tables */
1176         err =   __create_hyp_mappings(boot_hyp_pgd,
1177                                       hyp_idmap_start, hyp_idmap_end,
1178                                       __phys_to_pfn(hyp_idmap_start),
1179                                       PAGE_HYP);
1180
1181         if (err) {
1182                 kvm_err("Failed to idmap %lx-%lx\n",
1183                         hyp_idmap_start, hyp_idmap_end);
1184                 goto out;
1185         }
1186
1187         /* Map the very same page at the trampoline VA */
1188         err =   __create_hyp_mappings(boot_hyp_pgd,
1189                                       TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1190                                       __phys_to_pfn(hyp_idmap_start),
1191                                       PAGE_HYP);
1192         if (err) {
1193                 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1194                         TRAMPOLINE_VA);
1195                 goto out;
1196         }
1197
1198         /* Map the same page again into the runtime page tables */
1199         err =   __create_hyp_mappings(hyp_pgd,
1200                                       TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1201                                       __phys_to_pfn(hyp_idmap_start),
1202                                       PAGE_HYP);
1203         if (err) {
1204                 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1205                         TRAMPOLINE_VA);
1206                 goto out;
1207         }
1208
1209         return 0;
1210 out:
1211         free_hyp_pgds();
1212         return err;
1213 }
1214
1215 void kvm_arch_commit_memory_region(struct kvm *kvm,
1216                                    struct kvm_userspace_memory_region *mem,
1217                                    const struct kvm_memory_slot *old,
1218                                    enum kvm_mr_change change)
1219 {
1220 }
1221
1222 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1223                                    struct kvm_memory_slot *memslot,
1224                                    struct kvm_userspace_memory_region *mem,
1225                                    enum kvm_mr_change change)
1226 {
1227         hva_t hva = mem->userspace_addr;
1228         hva_t reg_end = hva + mem->memory_size;
1229         bool writable = !(mem->flags & KVM_MEM_READONLY);
1230         int ret = 0;
1231
1232         if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1233                 return 0;
1234
1235         /*
1236          * Prevent userspace from creating a memory region outside of the IPA
1237          * space addressable by the KVM guest IPA space.
1238          */
1239         if (memslot->base_gfn + memslot->npages >=
1240             (KVM_PHYS_SIZE >> PAGE_SHIFT))
1241                 return -EFAULT;
1242
1243         /*
1244          * A memory region could potentially cover multiple VMAs, and any holes
1245          * between them, so iterate over all of them to find out if we can map
1246          * any of them right now.
1247          *
1248          *     +--------------------------------------------+
1249          * +---------------+----------------+   +----------------+
1250          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
1251          * +---------------+----------------+   +----------------+
1252          *     |               memory region                |
1253          *     +--------------------------------------------+
1254          */
1255         do {
1256                 struct vm_area_struct *vma = find_vma(current->mm, hva);
1257                 hva_t vm_start, vm_end;
1258
1259                 if (!vma || vma->vm_start >= reg_end)
1260                         break;
1261
1262                 /*
1263                  * Mapping a read-only VMA is only allowed if the
1264                  * memory region is configured as read-only.
1265                  */
1266                 if (writable && !(vma->vm_flags & VM_WRITE)) {
1267                         ret = -EPERM;
1268                         break;
1269                 }
1270
1271                 /*
1272                  * Take the intersection of this VMA with the memory region
1273                  */
1274                 vm_start = max(hva, vma->vm_start);
1275                 vm_end = min(reg_end, vma->vm_end);
1276
1277                 if (vma->vm_flags & VM_PFNMAP) {
1278                         gpa_t gpa = mem->guest_phys_addr +
1279                                     (vm_start - mem->userspace_addr);
1280                         phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1281                                          vm_start - vma->vm_start;
1282
1283                         ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1284                                                     vm_end - vm_start,
1285                                                     writable);
1286                         if (ret)
1287                                 break;
1288                 }
1289                 hva = vm_end;
1290         } while (hva < reg_end);
1291
1292         if (ret) {
1293                 spin_lock(&kvm->mmu_lock);
1294                 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1295                 spin_unlock(&kvm->mmu_lock);
1296         }
1297         return ret;
1298 }
1299
1300 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1301                            struct kvm_memory_slot *dont)
1302 {
1303 }
1304
1305 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1306                             unsigned long npages)
1307 {
1308         return 0;
1309 }
1310
1311 void kvm_arch_memslots_updated(struct kvm *kvm)
1312 {
1313 }
1314
1315 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1316 {
1317 }
1318
1319 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1320                                    struct kvm_memory_slot *slot)
1321 {
1322         gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1323         phys_addr_t size = slot->npages << PAGE_SHIFT;
1324
1325         spin_lock(&kvm->mmu_lock);
1326         unmap_stage2_range(kvm, gpa, size);
1327         spin_unlock(&kvm->mmu_lock);
1328 }