summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2022-11-24 11:05:43 +1000
committerDave Airlie <airlied@redhat.com>2022-11-24 11:05:43 +1000
commitd47f9580839eb6fe568e38b2084d94887fbf5ce0 (patch)
tree2493555057f8e5c850590d7438fdbf4472666201 /virt
parent3d335a523b938a445a674be24d1dd5c7a4c86fb6 (diff)
parenteb7081409f94a9a8608593d0fb63a1aa3d6f95d8 (diff)
Backmerge tag 'v6.1-rc6' into drm-next
Linux 6.1-rc6 This is needed for drm-misc-next and tegra. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c16
-rw-r--r--virt/kvm/pfncache.c62
2 files changed, 55 insertions, 23 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1376a47fedee..25d7872b29c1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4585,6 +4585,9 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
}
case KVM_CAP_DIRTY_LOG_RING:
case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
+ if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
+ return -EINVAL;
+
return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
@@ -5409,6 +5412,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt)
{
+ int ret;
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
inode->i_private;
@@ -5420,15 +5424,13 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
if (!kvm_get_kvm_safe(stat_data->kvm))
return -ENOENT;
- if (simple_attr_open(inode, file, get,
- kvm_stats_debugfs_mode(stat_data->desc) & 0222
- ? set : NULL,
- fmt)) {
+ ret = simple_attr_open(inode, file, get,
+ kvm_stats_debugfs_mode(stat_data->desc) & 0222
+ ? set : NULL, fmt);
+ if (ret)
kvm_put_kvm(stat_data->kvm);
- return -ENOMEM;
- }
- return 0;
+ return ret;
}
static int kvm_debugfs_release(struct inode *inode, struct file *file)
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 68ff41d39545..346e47f15572 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{
struct kvm_memslots *slots = kvm_memslots(kvm);
+ if (!gpc->active)
+ return false;
+
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false;
@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
{
struct kvm_memslots *slots = kvm_memslots(kvm);
unsigned long page_offset = gpa & ~PAGE_MASK;
- kvm_pfn_t old_pfn, new_pfn;
+ bool unmap_old = false;
unsigned long old_uhva;
+ kvm_pfn_t old_pfn;
void *old_khva;
- int ret = 0;
+ int ret;
/*
* If must fit within a single page. The 'len' argument is
@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
write_lock_irq(&gpc->lock);
+ if (!gpc->active) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
old_pfn = gpc->pfn;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_uhva = gpc->uhva;
@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
/* If the HVA→PFN mapping was already valid, don't unmap it. */
old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL;
+ ret = 0;
}
out:
@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->khva = NULL;
}
- /* Snapshot the new pfn before dropping the lock! */
- new_pfn = gpc->pfn;
+ /* Detect a pfn change before dropping the lock! */
+ unmap_old = (old_pfn != gpc->pfn);
+out_unlock:
write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock);
- if (old_pfn != new_pfn)
+ if (unmap_old)
gpc_unmap_khva(kvm, old_pfn, old_khva);
return ret;
@@ -346,42 +357,61 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
+{
+ rwlock_init(&gpc->lock);
+ mutex_init(&gpc->refresh_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_init);
-int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
- gpa_t gpa, unsigned long len)
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len)
{
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
if (!gpc->active) {
- rwlock_init(&gpc->lock);
- mutex_init(&gpc->refresh_lock);
-
gpc->khva = NULL;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
gpc->vcpu = vcpu;
gpc->usage = usage;
gpc->valid = false;
- gpc->active = true;
spin_lock(&kvm->gpc_lock);
list_add(&gpc->list, &kvm->gpc_list);
spin_unlock(&kvm->gpc_lock);
+
+ /*
+ * Activate the cache after adding it to the list, a concurrent
+ * refresh must not establish a mapping until the cache is
+ * reachable by mmu_notifier events.
+ */
+ write_lock_irq(&gpc->lock);
+ gpc->active = true;
+ write_unlock_irq(&gpc->lock);
}
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);
+EXPORT_SYMBOL_GPL(kvm_gpc_activate);
-void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
if (gpc->active) {
+ /*
+ * Deactivate the cache before removing it from the list, KVM
+ * must stall mmu_notifier events until all users go away, i.e.
+ * until gpc->lock is dropped and refresh is guaranteed to fail.
+ */
+ write_lock_irq(&gpc->lock);
+ gpc->active = false;
+ write_unlock_irq(&gpc->lock);
+
spin_lock(&kvm->gpc_lock);
list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock);
kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
- gpc->active = false;
}
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy);
+EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);