diff options
author | Marc Zyngier <maz@kernel.org> | 2024-09-12 08:36:36 +0100 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2024-09-12 08:36:36 +0100 |
commit | 8884fd12f2807be3f7ba76bee7387d68e61e4a31 (patch) | |
tree | 6d04ba04591e82084995c3b26f891453e3c3af0f | |
parent | 0d56099ed557ce7f76c0ede97272fe58dbf0b7f2 (diff) | |
parent | f26a525b77e040d584e967369af1e018d2d59112 (diff) |
Merge branch kvm-arm64/mmu-misc-6.12 into kvmarm-master/next
* kvm-arm64/mmu-misc-6.12:
: .
: Various minor MMU improvements and bug-fixes:
:
: - Prevent MTE tags being restored by userspace if we are actively
: logging writes, as that's a recipe for disaster
:
: - Correct the refcount on a page that is not considered for MTE
: tag copying (such as a device)
:
: - When walking a page table to split blocks, keep the DSB at the end
: the walk, as there is no need to perform it on every store.
:
: - Fix boundary check when transfering memory using FFA
: .
KVM: arm64: Add memory length checks and remove inline in do_ffa_mem_xfer
KVM: arm64: Disallow copying MTE to guest memory while KVM is dirty logging
KVM: arm64: Release pfn, i.e. put page, if copying MTE tags hits ZONE_DEVICE
KVM: arm64: Move data barrier to end of split walk
Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r-- | arch/arm64/kvm/guest.c | 6 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/ffa.c | 21 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/pgtable.c | 6 |
3 files changed, 25 insertions, 8 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 11098eb7eb44..962f985977c2 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1045,6 +1045,11 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, mutex_lock(&kvm->slots_lock); + if (write && atomic_read(&kvm->nr_memslots_dirty_logging)) { + ret = -EBUSY; + goto out; + } + while (length > 0) { kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL); void *maddr; @@ -1059,6 +1064,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, page = pfn_to_online_page(pfn); if (!page) { /* Reject ZONE_DEVICE memory */ + kvm_release_pfn_clean(pfn); ret = -EFAULT; goto out; } diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index e715c157c2c4..e433dfab882a 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -426,9 +426,9 @@ out: return; } -static __always_inline void do_ffa_mem_xfer(const u64 func_id, - struct arm_smccc_res *res, - struct kvm_cpu_context *ctxt) +static void __do_ffa_mem_xfer(const u64 func_id, + struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) { DECLARE_REG(u32, len, ctxt, 1); DECLARE_REG(u32, fraglen, ctxt, 2); @@ -440,9 +440,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, u32 offset, nr_ranges; int ret = 0; - BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && - func_id != FFA_FN64_MEM_LEND); - if (addr_mbz || npages_mbz || fraglen > len || fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { ret = FFA_RET_INVALID_PARAMETERS; @@ -461,6 +458,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; } + if (len > ffa_desc_buf.len) { + ret = FFA_RET_NO_MEMORY; + goto out_unlock; + } + buf = hyp_buffers.tx; memcpy(buf, host_buffers.tx, fraglen); @@ -512,6 +514,13 @@ err_unshare: goto out_unlock; } +#define do_ffa_mem_xfer(fid, res, ctxt) \ + do { \ + BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \ + (fid) != FFA_FN64_MEM_LEND); \ + __do_ffa_mem_xfer((fid), (res), (ctxt)); \ + } while (0); + static void do_ffa_mem_reclaim(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) { diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 9e2bbee77491..9788af2ca8c0 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1547,7 +1547,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx, */ new = kvm_init_table_pte(childp, mm_ops); stage2_make_pte(ctx, new); - dsb(ishst); return 0; } @@ -1559,8 +1558,11 @@ int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, .flags = KVM_PGTABLE_WALK_LEAF, .arg = mc, }; + int ret; - return kvm_pgtable_walk(pgt, addr, size, &walker); + ret = kvm_pgtable_walk(pgt, addr, size, &walker); + dsb(ishst); + return ret; } int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, |