diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-10 13:16:35 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-10 13:16:35 -0800 |
commit | 15303ba5d1cd9b28d03a980456c0978c0ea3b208 (patch) | |
tree | b9200d5b7474661cf36468038529a5269ee83238 /arch/s390/mm | |
parent | 9a61df9e5f7471fe5be3e02bd0bed726b2761a54 (diff) | |
parent | 1ab03c072feb579c9fd116de25be2b211e6bff6a (diff) |
Merge tag 'kvm-4.16-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář:
"ARM:
- icache invalidation optimizations, improving VM startup time
- support for forwarded level-triggered interrupts, improving
performance for timers and passthrough platform devices
- a small fix for power-management notifiers, and some cosmetic
changes
PPC:
- add MMIO emulation for vector loads and stores
- allow HPT guests to run on a radix host on POWER9 v2.2 CPUs without
requiring the complex thread synchronization of older CPU versions
- improve the handling of escalation interrupts with the XIVE
interrupt controller
- support decrement register migration
- various cleanups and bugfixes.
s390:
- Cornelia Huck passed maintainership to Janosch Frank
- exitless interrupts for emulated devices
- cleanup of cpuflag handling
- kvm_stat counter improvements
- VSIE improvements
- mm cleanup
x86:
- hypervisor part of SEV
- UMIP, RDPID, and MSR_SMI_COUNT emulation
- paravirtualized TLB shootdown using the new KVM_VCPU_PREEMPTED bit
- allow guests to see TOPOEXT, GFNI, VAES, VPCLMULQDQ, and more
AVX512 features
- show vcpu id in its anonymous inode name
- many fixes and cleanups
- per-VCPU MSR bitmaps (already merged through x86/pti branch)
- stable KVM clock when nesting on Hyper-V (merged through
x86/hyperv)"
* tag 'kvm-4.16-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (197 commits)
KVM: PPC: Book3S: Add MMIO emulation for VMX instructions
KVM: PPC: Book3S HV: Branch inside feature section
KVM: PPC: Book3S HV: Make HPT resizing work on POWER9
KVM: PPC: Book3S HV: Fix handling of secondary HPTEG in HPT resizing code
KVM: PPC: Book3S PR: Fix broken select due to misspelling
KVM: x86: don't forget vcpu_put() in kvm_arch_vcpu_ioctl_set_sregs()
KVM: PPC: Book3S PR: Fix svcpu copying with preemption enabled
KVM: PPC: Book3S HV: Drop locks before reading guest memory
kvm: x86: remove efer_reload entry in kvm_vcpu_stat
KVM: x86: AMD Processor Topology Information
x86/kvm/vmx: do not use vm-exit instruction length for fast MMIO when running nested
kvm: embed vcpu id to dentry of vcpu anon inode
kvm: Map PFN-type memory regions as writable (if possible)
x86/kvm: Make it compile on 32bit and with HYPYERVISOR_GUEST=n
KVM: arm/arm64: Fixup userspace irqchip static key optimization
KVM: arm/arm64: Fix userspace_irqchip_in_use counting
KVM: arm/arm64: Fix incorrect timer_is_pending logic
MAINTAINERS: update KVM/s390 maintainers
MAINTAINERS: add Halil as additional vfio-ccw maintainer
MAINTAINERS: add David as a reviewer for KVM/s390
...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/gmap.c | 44 |
1 files changed, 18 insertions, 26 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 05d459b638f5..2c55a2b9d6c6 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -815,27 +815,17 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, * @ptl: pointer to the spinlock pointer * * Returns a pointer to the locked pte for a guest address, or NULL - * - * Note: Can also be called for shadow gmaps. */ static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr, spinlock_t **ptl) { unsigned long *table; - if (gmap_is_shadow(gmap)) - spin_lock(&gmap->guest_table_lock); + BUG_ON(gmap_is_shadow(gmap)); /* Walk the gmap page table, lock and get pte pointer */ table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */ - if (!table || *table & _SEGMENT_ENTRY_INVALID) { - if (gmap_is_shadow(gmap)) - spin_unlock(&gmap->guest_table_lock); + if (!table || *table & _SEGMENT_ENTRY_INVALID) return NULL; - } - if (gmap_is_shadow(gmap)) { - *ptl = &gmap->guest_table_lock; - return pte_offset_map((pmd_t *) table, gaddr); - } return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); } @@ -889,8 +879,6 @@ static void gmap_pte_op_end(spinlock_t *ptl) * -EFAULT if gaddr is invalid (or mapping for shadows is missing). * * Called with sg->mm->mmap_sem in read. - * - * Note: Can also be called for shadow gmaps. */ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, unsigned long len, int prot, unsigned long bits) @@ -900,6 +888,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, pte_t *ptep; int rc; + BUG_ON(gmap_is_shadow(gmap)); while (len) { rc = -EAGAIN; ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); @@ -960,7 +949,8 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify); * @val: pointer to the unsigned long value to return * * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT - * if reading using the virtual address failed. + * if reading using the virtual address failed. -EINVAL if called on a gmap + * shadow. * * Called with gmap->mm->mmap_sem in read. */ @@ -971,6 +961,9 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) pte_t *ptep, pte; int rc; + if (gmap_is_shadow(gmap)) + return -EINVAL; + while (1) { rc = -EAGAIN; ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); @@ -1028,18 +1021,17 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, } /** - * gmap_protect_rmap - modify access rights to memory and create an rmap + * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap * @sg: pointer to the shadow guest address space structure * @raddr: rmap address in the shadow gmap * @paddr: address in the parent guest address space * @len: length of the memory area to protect - * @prot: indicates access rights: none, read-only or read-write * * Returns 0 if successfully protected and the rmap was created, -ENOMEM * if out of memory and -EFAULT if paddr is invalid. */ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, - unsigned long paddr, unsigned long len, int prot) + unsigned long paddr, unsigned long len) { struct gmap *parent; struct gmap_rmap *rmap; @@ -1067,7 +1059,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, ptep = gmap_pte_op_walk(parent, paddr, &ptl); if (ptep) { spin_lock(&sg->guest_table_lock); - rc = ptep_force_prot(parent->mm, paddr, ptep, prot, + rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ, PGSTE_VSIE_BIT); if (!rc) gmap_insert_rmap(sg, vmaddr, rmap); @@ -1077,7 +1069,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, radix_tree_preload_end(); if (rc) { kfree(rmap); - rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot); + rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ); if (rc) return rc; continue; @@ -1616,7 +1608,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, origin = r2t & _REGION_ENTRY_ORIGIN; offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; - rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin + offset, len); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 4); @@ -1699,7 +1691,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, origin = r3t & _REGION_ENTRY_ORIGIN; offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; - rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin + offset, len); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 3); @@ -1783,7 +1775,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, origin = sgt & _REGION_ENTRY_ORIGIN; offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; - rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin + offset, len); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 2); @@ -1902,7 +1894,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, /* Make pgt read-only in parent gmap page table (not the pgste) */ raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT; origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK; - rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 1); @@ -2005,7 +1997,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_page); * Called with sg->parent->shadow_lock. */ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr, - unsigned long gaddr, pte_t *pte) + unsigned long gaddr) { struct gmap_rmap *rmap, *rnext, *head; unsigned long start, end, bits, raddr; @@ -2090,7 +2082,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, spin_lock(&gmap->shadow_lock); list_for_each_entry_safe(sg, next, &gmap->children, list) - gmap_shadow_notify(sg, vmaddr, gaddr, pte); + gmap_shadow_notify(sg, vmaddr, gaddr); spin_unlock(&gmap->shadow_lock); } if (bits & PGSTE_IN_BIT) |