KVM: do not treat noslot pfn as a error pfn
This patch filters noslot pfn out from error pfns based on Marcelo comment: noslot pfn is not a error pfn After this patch, - is_noslot_pfn indicates that the gfn is not in slot - is_error_pfn indicates that the gfn is in slot but the error is occurred when translate the gfn to pfn - is_error_noslot_pfn indicates that the pfn either it is error pfns or it is noslot pfn And is_invalid_pfn can be removed, it makes the code more clean Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
19bf7f8ac3
commit
81c52c56e2
9 changed files with 32 additions and 20 deletions
|
@ -155,7 +155,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||||
|
|
||||||
/* Get host physical address for gpa */
|
/* Get host physical address for gpa */
|
||||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
||||||
if (is_error_pfn(hpaddr)) {
|
if (is_error_noslot_pfn(hpaddr)) {
|
||||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
|
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
|
||||||
orig_pte->eaddr);
|
orig_pte->eaddr);
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
|
|
|
@ -93,7 +93,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||||
|
|
||||||
/* Get host physical address for gpa */
|
/* Get host physical address for gpa */
|
||||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
||||||
if (is_error_pfn(hpaddr)) {
|
if (is_error_noslot_pfn(hpaddr)) {
|
||||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
|
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -524,7 +524,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
if (likely(!pfnmap)) {
|
if (likely(!pfnmap)) {
|
||||||
unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
|
unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
|
||||||
pfn = gfn_to_pfn_memslot(slot, gfn);
|
pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||||
if (is_error_pfn(pfn)) {
|
if (is_error_noslot_pfn(pfn)) {
|
||||||
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
|
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
|
||||||
(long)gfn);
|
(long)gfn);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -2699,7 +2699,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
||||||
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
|
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
|
if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
|
||||||
level == PT_PAGE_TABLE_LEVEL &&
|
level == PT_PAGE_TABLE_LEVEL &&
|
||||||
PageTransCompound(pfn_to_page(pfn)) &&
|
PageTransCompound(pfn_to_page(pfn)) &&
|
||||||
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
|
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
|
||||||
|
@ -2733,7 +2733,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
/* The pfn is invalid, report the error! */
|
/* The pfn is invalid, report the error! */
|
||||||
if (unlikely(is_invalid_pfn(pfn))) {
|
if (unlikely(is_error_pfn(pfn))) {
|
||||||
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
|
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
|
@ -323,7 +323,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||||
protect_clean_gpte(&pte_access, gpte);
|
protect_clean_gpte(&pte_access, gpte);
|
||||||
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
|
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
|
||||||
no_dirty_log && (pte_access & ACC_WRITE_MASK));
|
no_dirty_log && (pte_access & ACC_WRITE_MASK));
|
||||||
if (is_invalid_pfn(pfn))
|
if (is_error_pfn(pfn))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -4504,7 +4504,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
* instruction -> ...
|
* instruction -> ...
|
||||||
*/
|
*/
|
||||||
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
|
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
|
||||||
if (!is_error_pfn(pfn)) {
|
if (!is_error_noslot_pfn(pfn)) {
|
||||||
kvm_release_pfn_clean(pfn);
|
kvm_release_pfn_clean(pfn);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,28 +58,40 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the normal pfn, the highest 12 bits should be zero,
|
* For the normal pfn, the highest 12 bits should be zero,
|
||||||
* so we can mask these bits to indicate the error.
|
* so we can mask bit 62 ~ bit 52 to indicate the error pfn,
|
||||||
|
* mask bit 63 to indicate the noslot pfn.
|
||||||
*/
|
*/
|
||||||
#define KVM_PFN_ERR_MASK (0xfffULL << 52)
|
#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
|
||||||
|
#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
|
||||||
|
#define KVM_PFN_NOSLOT (0x1ULL << 63)
|
||||||
|
|
||||||
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
|
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
|
||||||
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
|
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
|
||||||
#define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2)
|
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
|
||||||
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* error pfns indicate that the gfn is in slot but faild to
|
||||||
|
* translate it to pfn on host.
|
||||||
|
*/
|
||||||
static inline bool is_error_pfn(pfn_t pfn)
|
static inline bool is_error_pfn(pfn_t pfn)
|
||||||
{
|
{
|
||||||
return !!(pfn & KVM_PFN_ERR_MASK);
|
return !!(pfn & KVM_PFN_ERR_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_noslot_pfn(pfn_t pfn)
|
/*
|
||||||
|
* error_noslot pfns indicate that the gfn can not be
|
||||||
|
* translated to pfn - it is not in slot or failed to
|
||||||
|
* translate it to pfn.
|
||||||
|
*/
|
||||||
|
static inline bool is_error_noslot_pfn(pfn_t pfn)
|
||||||
{
|
{
|
||||||
return pfn == KVM_PFN_ERR_BAD;
|
return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_invalid_pfn(pfn_t pfn)
|
/* noslot pfn indicates that the gfn is not in slot. */
|
||||||
|
static inline bool is_noslot_pfn(pfn_t pfn)
|
||||||
{
|
{
|
||||||
return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
|
return pfn == KVM_PFN_NOSLOT;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
|
#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
|
||||||
|
|
|
@ -52,7 +52,7 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||||
end_gfn = gfn + (size >> PAGE_SHIFT);
|
end_gfn = gfn + (size >> PAGE_SHIFT);
|
||||||
gfn += 1;
|
gfn += 1;
|
||||||
|
|
||||||
if (is_error_pfn(pfn))
|
if (is_error_noslot_pfn(pfn))
|
||||||
return pfn;
|
return pfn;
|
||||||
|
|
||||||
while (gfn < end_gfn)
|
while (gfn < end_gfn)
|
||||||
|
@ -106,7 +106,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||||
* important because we unmap and unpin in 4kb steps later.
|
* important because we unmap and unpin in 4kb steps later.
|
||||||
*/
|
*/
|
||||||
pfn = kvm_pin_pages(slot, gfn, page_size);
|
pfn = kvm_pin_pages(slot, gfn, page_size);
|
||||||
if (is_error_pfn(pfn)) {
|
if (is_error_noslot_pfn(pfn)) {
|
||||||
gfn += 1;
|
gfn += 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1208,7 +1208,7 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
|
||||||
return KVM_PFN_ERR_RO_FAULT;
|
return KVM_PFN_ERR_RO_FAULT;
|
||||||
|
|
||||||
if (kvm_is_error_hva(addr))
|
if (kvm_is_error_hva(addr))
|
||||||
return KVM_PFN_ERR_BAD;
|
return KVM_PFN_NOSLOT;
|
||||||
|
|
||||||
/* Do not map writable pfn in the readonly memslot. */
|
/* Do not map writable pfn in the readonly memslot. */
|
||||||
if (writable && memslot_is_readonly(slot)) {
|
if (writable && memslot_is_readonly(slot)) {
|
||||||
|
@ -1290,7 +1290,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
|
||||||
|
|
||||||
static struct page *kvm_pfn_to_page(pfn_t pfn)
|
static struct page *kvm_pfn_to_page(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (is_error_pfn(pfn))
|
if (is_error_noslot_pfn(pfn))
|
||||||
return KVM_ERR_PTR_BAD_PAGE;
|
return KVM_ERR_PTR_BAD_PAGE;
|
||||||
|
|
||||||
if (kvm_is_mmio_pfn(pfn)) {
|
if (kvm_is_mmio_pfn(pfn)) {
|
||||||
|
@ -1322,7 +1322,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
|
||||||
|
|
||||||
void kvm_release_pfn_clean(pfn_t pfn)
|
void kvm_release_pfn_clean(pfn_t pfn)
|
||||||
{
|
{
|
||||||
if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
|
if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
|
||||||
put_page(pfn_to_page(pfn));
|
put_page(pfn_to_page(pfn));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
||||||
|
|
Loading…
Add table
Reference in a new issue