On Wed, Nov 08, 2023, Nicolas Saenz Julienne wrote:
> Pass the memory attribute array through struct kvm_mmu_notifier_arg and
> use it in kvm_arch_post_set_memory_attributes() instead of defaulting on
> kvm->mem_attr_array.
>
> Signed-off-by: Nicolas Saenz Julienne <nsaenz@amazon.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 8 ++++----
> include/linux/kvm_host.h | 5 ++++-
> virt/kvm/kvm_main.c | 1 +
> 3 files changed, 9 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index c0fd3afd6be5..c2bec2be2ba9 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -7311,6 +7311,7 @@ static bool hugepage_has_attrs(struct xarray *mem_attr_array,
> bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
> struct kvm_gfn_range *range)
> {
> + struct xarray *mem_attr_array = range->arg.mem_attr_array;
> unsigned long attrs = range->arg.attributes;
> struct kvm_memory_slot *slot = range->slot;
> int level;
> @@ -7344,8 +7345,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
> * misaligned address regardless of memory attributes.
> */
> if (gfn >= slot->base_gfn) {
> - if (hugepage_has_attrs(&kvm->mem_attr_array,
> - slot, gfn, level, attrs))
> + if (hugepage_has_attrs(mem_attr_array, slot,
> + gfn, level, attrs))
This is wildly broken. The hugepage tracking is per VM, whereas the attributes
here are per-VTL. I.e. KVM will (dis)allow hugepages based on whatever VTL last
changed its protections.
@@ -7311,6 +7311,7 @@ static bool hugepage_has_attrs(struct xarray *mem_attr_array,
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{
+ struct xarray *mem_attr_array = range->arg.mem_attr_array;
unsigned long attrs = range->arg.attributes;
struct kvm_memory_slot *slot = range->slot;
int level;
@@ -7344,8 +7345,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* misaligned address regardless of memory attributes.
*/
if (gfn >= slot->base_gfn) {
- if (hugepage_has_attrs(&kvm->mem_attr_array,
- slot, gfn, level, attrs))
+ if (hugepage_has_attrs(mem_attr_array, slot,
+ gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else
hugepage_set_mixed(slot, gfn, level);
@@ -7367,8 +7368,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
*/
if (gfn < range->end &&
(gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
- if (hugepage_has_attrs(&kvm->mem_attr_array, slot, gfn,
- level, attrs))
+ if (hugepage_has_attrs(mem_attr_array, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else
hugepage_set_mixed(slot, gfn, level);
@@ -256,7 +256,10 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
union kvm_mmu_notifier_arg {
pte_t pte;
- unsigned long attributes;
+ struct {
+ unsigned long attributes;
+ struct xarray *mem_attr_array;
+ };
};
struct kvm_gfn_range {
@@ -2569,6 +2569,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
.start = start,
.end = end,
.arg.attributes = attributes,
+ .arg.mem_attr_array = &kvm->mem_attr_array,
.handler = kvm_arch_post_set_memory_attributes,
.on_lock = kvm_mmu_invalidate_end,
.may_block = true,