}
}
-static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
unsigned long i;
unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
}
- return false;
}
bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- if (kvm_is_radix(kvm))
- return kvm_unmap_radix(kvm, range->slot, range->start);
+ gfn_t gfn;
+
+ if (kvm_is_radix(kvm)) {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ kvm_unmap_radix(kvm, range->slot, gfn);
+ } else {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ kvm_unmap_rmapp(kvm, range->slot, range->start);
+ }
- return kvm_unmap_rmapp(kvm, range->slot, range->start);
+ return false;
}
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- if (kvm_is_radix(kvm))
- kvm_age_radix(kvm, range->slot, range->start);
+ gfn_t gfn;
+ bool ret = false;
- return kvm_age_rmapp(kvm, range->slot, range->start);
+ if (kvm_is_radix(kvm)) {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ ret |= kvm_age_radix(kvm, range->slot, gfn);
+ } else {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ ret |= kvm_age_rmapp(kvm, range->slot, gfn);
+ }
+
+ return ret;
}
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- if (kvm_is_radix(kvm))
- kvm_test_age_radix(kvm, range->slot, range->start);
+ WARN_ON(range->start + 1 != range->end);
- return kvm_test_age_rmapp(kvm, range->slot, range->start);
+ if (kvm_is_radix(kvm))
+ return kvm_test_age_radix(kvm, range->slot, range->start);
+ else
+ return kvm_test_age_rmapp(kvm, range->slot, range->start);
}
bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
+ WARN_ON(range->start + 1 != range->end);
+
if (kvm_is_radix(kvm))
- return kvm_unmap_radix(kvm, range->slot, range->start);
+ kvm_unmap_radix(kvm, range->slot, range->start);
+ else
+ kvm_unmap_rmapp(kvm, range->slot, range->start);
- return kvm_unmap_rmapp(kvm, range->slot, range->start);
+ return false;
}
static int vcpus_running(struct kvm *kvm)
}
/* Called with kvm->mmu_lock held */
-bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
pte_t *ptep;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
- return false;
+ return;
}
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid);
- return false;
}
/* Called with kvm->mmu_lock held */