@@ -213,6 +213,13 @@ extern pgd_t swapper_pg_dir[];
extern pgd_t trampoline_pg_dir[];
extern pgd_t early_pg_dir[];
+static inline unsigned long make_satp(unsigned long pfn,
+ unsigned long asid, unsigned long satp_mode)
+{
+ return (pfn_to_hwpfn(pfn) |
+ ((asid & SATP_ASID_MASK) << SATP_ASID_SHIFT) | satp_mode);
+}
+
static __always_inline int __pte_present(unsigned long pteval)
{
return (pteval & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -87,7 +87,7 @@ relocate_enable_mmu:
csrw CSR_TVEC, a2
/* Compute satp for kernel page tables, but don't load it yet */
- srl a2, a0, PAGE_SHIFT
+ srl a2, a0, HW_PAGE_SHIFT
la a1, satp_mode
REG_L a1, 0(a1)
or a2, a2, a1
@@ -100,7 +100,7 @@ relocate_enable_mmu:
*/
la a0, trampoline_pg_dir
XIP_FIXUP_OFFSET a0
- srl a0, a0, PAGE_SHIFT
+ srl a0, a0, HW_PAGE_SHIFT
or a0, a0, a1
sfence.vma
csrw CSR_SATP, a0
@@ -395,7 +395,8 @@ int swsusp_arch_resume(void)
if (ret)
return ret;
- hibernate_restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | satp_mode),
+ hibernate_restore_image(resume_hdr.saved_satp,
+ make_satp(PFN_DOWN(__pa(resume_pg_dir)), 0, satp_mode),
resume_hdr.restore_cpu_addr);
return 0;
@@ -190,9 +190,8 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&context_lock, flags);
switch_mm_fast:
- csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
- ((cntx & asid_mask) << SATP_ASID_SHIFT) |
- satp_mode);
+ csr_write(CSR_SATP, make_satp(virt_to_pfn(mm->pgd), (cntx & asid_mask),
+ satp_mode));
if (need_flush_tlb)
local_flush_tlb_all();
@@ -201,7 +200,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
static void set_mm_noasid(struct mm_struct *mm)
{
/* Switch the page table and blindly nuke entire local TLB */
- csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
+ csr_write(CSR_SATP, make_satp(virt_to_pfn(mm->pgd), 0, satp_mode));
local_flush_tlb_all();
}
@@ -133,6 +133,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
*/
index = pgd_index(addr);
pfn = csr_read(CSR_SATP) & SATP_PPN;
+ pfn = hwpfn_to_pfn(pfn);
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
@@ -805,7 +805,7 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
(uintptr_t)early_p4d : (uintptr_t)early_pud,
PGDIR_SIZE, PAGE_TABLE);
- identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
+ identity_satp = make_satp(PFN_DOWN((uintptr_t)&early_pg_dir), 0, satp_mode);
local_flush_tlb_all();
csr_write(CSR_SATP, identity_satp);
@@ -1285,6 +1285,8 @@ static void __init create_linear_mapping_page_table(void)
static void __init setup_vm_final(void)
{
+ unsigned long satp;
+
/* Setup swapper PGD for fixmap */
#if !defined(CONFIG_64BIT)
/*
@@ -1318,7 +1320,8 @@ static void __init setup_vm_final(void)
clear_fixmap(FIX_P4D);
/* Move to swapper page table */
- csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
+ satp = make_satp(PFN_DOWN(__pa_symbol(swapper_pg_dir)), 0, satp_mode);
+ csr_write(CSR_SATP, satp);
local_flush_tlb_all();
pt_ops_set_late();
@@ -471,11 +471,13 @@ static void __init create_tmp_mapping(void)
void __init kasan_init(void)
{
+ unsigned long satp;
phys_addr_t p_start, p_end;
u64 i;
create_tmp_mapping();
- csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
+ satp = make_satp(PFN_DOWN(__pa(tmp_pg_dir)), 0, satp_mode);
+ csr_write(CSR_SATP, satp);
kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -520,6 +522,7 @@ void __init kasan_init(void)
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
init_task.kasan_depth = 0;
- csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
+ satp = make_satp(PFN_DOWN(__pa(swapper_pg_dir)), 0, satp_mode);
+ csr_write(CSR_SATP, satp);
local_flush_tlb_all();
}