| From 087f5b2e8571304840411ed8ed5453972c778ee1 Mon Sep 17 00:00:00 2001 |
| From: David Stevens <stevensd@chromium.org> |
| Date: Mon, 29 Nov 2021 12:43:16 +0900 |
| Subject: [PATCH] FROMLIST: KVM: arm64/mmu: use gfn_to_pfn_page |
| |
| Covert usages of the deprecated gfn_to_pfn functions to the new |
| gfn_to_pfn_page functions. |
| |
| Signed-off-by: David Stevens <stevensd@chromium.org> |
| (am from https://patchwork.kernel.org/project/kvm/patch/20211129034317.2964790-4-stevensd@google.com/) |
| (also found at https://lore.kernel.org/r/20211129034317.2964790-4-stevensd@google.com) |
| |
| BUG=b:184929592, b:176387875 |
| TEST=tast run zork arc.VideoDecodeAccel.vp9_vm |
| |
| Change-Id: I71b8ab7189fa876841ac891a133cbe5b3133e64c |
| Disallow-Recycled-Builds: test-failures |
| Signed-off-by: Guenter Roeck <groeck@chromium.org> |
| Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/3307494 |
| Reviewed-by: Sean Paul <seanpaul@chromium.org> |
| Reviewed-by: David Stevens <stevensd@chromium.org> |
| --- |
| arch/arm64/kvm/mmu.c | 25 ++++++++++++++++--------- |
| 1 file changed, 16 insertions(+), 9 deletions(-) |
| |
| diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c |
| index 0d19259454d8..731790a35053 100644 |
| --- a/arch/arm64/kvm/mmu.c |
| +++ b/arch/arm64/kvm/mmu.c |
| @@ -961,7 +961,8 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| * sure that the HVA and IPA are sufficiently aligned and that the |
| * block map is contained within the memslot. |
| */ |
| - if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && |
| + if (*page && |
| + fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && |
| get_user_mapping_size(kvm, hva) >= PMD_SIZE) { |
| /* |
| * The address we faulted on is backed by a transparent huge |
| @@ -982,10 +983,11 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| * page accordingly. |
| */ |
| *ipap &= PMD_MASK; |
| - kvm_release_pfn_clean(pfn); |
| + put_page(*page); |
| pfn &= ~(PTRS_PER_PMD - 1); |
| - get_page(pfn_to_page(pfn)); |
| *pfnp = pfn; |
| + *page = pfn_to_page(pfn); |
| + get_page(*page); |
| |
| return PMD_SIZE; |
| } |
| @@ -1078,6 +1080,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| short vma_shift; |
| gfn_t gfn; |
| kvm_pfn_t pfn; |
| + struct page *page; |
| bool logging_active = memslot_is_logging(memslot); |
| bool logging_perm_fault = false; |
| unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); |
| @@ -1181,8 +1184,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| */ |
| smp_rmb(); |
| |
| - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, |
| - write_fault, &writable, NULL); |
| + pfn = __gfn_to_pfn_page_memslot(memslot, gfn, false, NULL, |
| + write_fault, &writable, NULL, &page); |
| if (pfn == KVM_PFN_ERR_HWPOISON) { |
| kvm_send_hwpoison_signal(hva, vma_shift); |
| return 0; |
| @@ -1235,7 +1238,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| vma_pagesize = fault_granule; |
| else |
| vma_pagesize = transparent_hugepage_adjust(kvm, memslot, |
| - hva, &pfn, |
| + hva, |
| + &pfn, &page, |
| &fault_ipa); |
| } |
| |
| @@ -1275,7 +1279,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| |
| /* Mark the page dirty only if the fault is handled successfully */ |
| if (writable && !ret) { |
| - kvm_set_pfn_dirty(pfn); |
| + if (page) |
| + kvm_set_pfn_dirty(pfn); |
| mark_page_dirty_in_slot(kvm, memslot, gfn); |
| } |
| |
| @@ -1284,8 +1289,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| read_unlock(&kvm->mmu_lock); |
| else |
| write_unlock(&kvm->mmu_lock); |
| - kvm_set_pfn_accessed(pfn); |
| - kvm_release_pfn_clean(pfn); |
| + if (page) { |
| + kvm_set_pfn_accessed(pfn); |
| + put_page(page); |
| + } |
| return ret != -EAGAIN ? ret : 0; |
| } |
| |
| -- |
| 2.35.0 |
| |