LoongArch: mm: Fix huge page entry update for virtual machine

Message ID 20221207141043.1600863-1-chenhuacai@loongson.cn
State New
Headers
Series LoongArch: mm: Fix huge page entry update for virtual machine |

Commit Message

Huacai Chen Dec. 7, 2022, 2:10 p.m. UTC
  In virtual machine (guest mode), the tlbwr instruction can not write the
last entry of MTLB, so we need to make it non-present by invtlb and then
write it by tlbfill. This also simplify the whole logic.

Signed-off-by: Rui Wang <wangrui@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
 arch/loongarch/mm/tlbex.S | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)
  

Patch

diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index d8ee8fbc8c67..58781c6e4191 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -10,6 +10,8 @@ 
 #include <asm/regdef.h>
 #include <asm/stackframe.h>
 
+#define INVTLB_ADDR_GFALSE_AND_ASID	5
+
 #define PTRS_PER_PGD_BITS	(PAGE_SHIFT - 3)
 #define PTRS_PER_PUD_BITS	(PAGE_SHIFT - 3)
 #define PTRS_PER_PMD_BITS	(PAGE_SHIFT - 3)
@@ -136,13 +138,10 @@  tlb_huge_update_load:
 	ori		t0, ra, _PAGE_VALID
 	st.d		t0, t1, 0
 #endif
-	tlbsrch
-	addu16i.d	t1, zero, -(CSR_TLBIDX_EHINV >> 16)
-	addi.d		ra, t1, 0
-	csrxchg		ra, t1, LOONGARCH_CSR_TLBIDX
-	tlbwr
-
-	csrxchg		zero, t1, LOONGARCH_CSR_TLBIDX
+	csrrd		ra, LOONGARCH_CSR_ASID
+	csrrd		t1, LOONGARCH_CSR_BADV
+	andi		ra, ra, CSR_ASID_ASID
+	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
 
 	/*
 	 * A huge PTE describes an area the size of the
@@ -287,13 +286,11 @@  tlb_huge_update_store:
 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 	st.d		t0, t1, 0
 #endif
-	tlbsrch
-	addu16i.d	t1, zero, -(CSR_TLBIDX_EHINV >> 16)
-	addi.d		ra, t1, 0
-	csrxchg		ra, t1, LOONGARCH_CSR_TLBIDX
-	tlbwr
+	csrrd		ra, LOONGARCH_CSR_ASID
+	csrrd		t1, LOONGARCH_CSR_BADV
+	andi		ra, ra, CSR_ASID_ASID
+	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
 
-	csrxchg		zero, t1, LOONGARCH_CSR_TLBIDX
 	/*
 	 * A huge PTE describes an area the size of the
 	 * configured huge page size. This is twice the
@@ -436,6 +433,11 @@  tlb_huge_update_modify:
 	ori		t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
 	st.d		t0, t1, 0
 #endif
+	csrrd		ra, LOONGARCH_CSR_ASID
+	csrrd		t1, LOONGARCH_CSR_BADV
+	andi		ra, ra, CSR_ASID_ASID
+	invtlb		INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
+
 	/*
 	 * A huge PTE describes an area the size of the
 	 * configured huge page size. This is twice the
@@ -466,7 +468,7 @@  tlb_huge_update_modify:
 	addu16i.d	t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
 	csrxchg		t1, t0, LOONGARCH_CSR_TLBIDX
 
-	tlbwr
+	tlbfill
 
 	/* Reset default page size */
 	addu16i.d	t0, zero, (CSR_TLBIDX_PS >> 16)