[v3,02/12] m68k: Pass a pointer to virt_to_pfn() virt_to_page()
Commit Message
Functions that work on a pointer to virtual memory such as
virt_to_pfn() and users of that function such as
virt_to_page() are supposed to pass a pointer to virtual
memory, ideally a (void *) or other pointer. However since
many architectures implement virt_to_pfn() as a macro,
this function becomes polymorphic and accepts both a
(unsigned long) and a (void *).
Fix up the offending calls in arch/m68k with explicit casts.
The page table include <asm/pgtable.h> will include different
variants of the defines depending on whether you build for
classic m68k, ColdFire or Sun3, so fix all variants.
Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
---
ChangeLog v2->v3:
- Fix up versioning. This is v3.
- Let Coldfire __pte_page() return a (void *) instead of __va
- Delete Coldfire pte_pagenr() which was using unsigned long
semantics from __pte_page()
- Drop ill-advised change to Coldfire pmd_page_vaddr()
ChangeLog v1->v2:
- Fix the sun3 pgtable macro to not cast to unsigned long.
- Make a similar change to the ColdFire include.
- Add an extra parens around the page argument to the
PD_PTABLE() macro, as is normally required.
---
arch/m68k/include/asm/mcf_pgtable.h | 3 +--
arch/m68k/include/asm/sun3_pgtable.h | 4 ++--
arch/m68k/mm/mcfmmu.c | 3 ++-
arch/m68k/mm/motorola.c | 4 ++--
arch/m68k/mm/sun3mmu.c | 2 +-
arch/m68k/sun3/dvma.c | 2 +-
arch/m68k/sun3x/dvma.c | 2 +-
7 files changed, 10 insertions(+), 10 deletions(-)
Comments
Hi Linus,
On Tue, May 23, 2023 at 4:05 PM Linus Walleij <linus.walleij@linaro.org> wrote:
> Functions that work on a pointer to virtual memory such as
> virt_to_pfn() and users of that function such as
> virt_to_page() are supposed to pass a pointer to virtual
> memory, ideally a (void *) or other pointer. However since
> many architectures implement virt_to_pfn() as a macro,
> this function becomes polymorphic and accepts both a
> (unsigned long) and a (void *).
>
> Fix up the offending calls in arch/m68k with explicit casts.
>
> The page table include <asm/pgtable.h> will include different
> variants of the defines depending on whether you build for
> classic m68k, ColdFire or Sun3, so fix all variants.
>
> Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Thanks for your patch!
> ---
> ChangeLog v2->v3:
> - Fix up versioning. This is v3.
> - Let Coldfire __pte_page() return a (void *) instead of __va
> - Delete Coldfire pte_pagenr() which was using unsigned long
> semantics from __pte_page()
You may want to mention this removal in the patch descriptin.
> - Drop ill-advised change to Coldfire pmd_page_vaddr()
> --- a/arch/m68k/include/asm/sun3_pgtable.h
> +++ b/arch/m68k/include/asm/sun3_pgtable.h
> @@ -111,7 +111,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
>
> #define pte_page(pte) virt_to_page(__pte_page(pte))
> #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
> -#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
> +#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
There's an extra space between "void" and "*".
Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Gr{oetje,eeting}s,
Geert
@@ -115,7 +115,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
pgd_val(*pgdp) = virt_to_phys(pmdp);
}
-#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
+#define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK))
#define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
static inline int pte_none(pte_t pte)
@@ -134,7 +134,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_val(*ptep) = 0;
}
-#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pte_page(pte) virt_to_page(__pte_page(pte))
static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
@@ -91,7 +91,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pmd_set(pmdp,ptep) do {} while (0)
#define __pte_page(pte) \
-((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
+(__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
@@ -111,7 +111,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
#define pte_page(pte) virt_to_page(__pte_page(pte))
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
-#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
@@ -69,7 +69,8 @@ void __init paging_init(void)
/* now change pg_table to kernel virtual addresses */
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
- pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ pte_t pte = pfn_pte(virt_to_pfn((void *)address),
+ PAGE_INIT);
if (address >= (unsigned long) high_memory)
pte_val(pte) = 0;
@@ -102,7 +102,7 @@ static struct list_head ptable_list[2] = {
LIST_HEAD_INIT(ptable_list[1]),
};
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
@@ -201,7 +201,7 @@ int free_pointer_table(void *table, int type)
list_del(dp);
mmu_page_dtor((void *)page);
if (type == TABLE_PTE)
- pgtable_pte_page_dtor(virt_to_page(page));
+ pgtable_pte_page_dtor(virt_to_page((void *)page));
free_page (page);
return 1;
} else if (ptable_list[type].next != dp) {
@@ -75,7 +75,7 @@ void __init paging_init(void)
/* now change pg_table to kernel virtual addresses */
pg_table = (pte_t *) __va ((unsigned long) pg_table);
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
- pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT);
if (address >= (unsigned long)high_memory)
pte_val (pte) = 0;
set_pte (pg_table, pte);
@@ -29,7 +29,7 @@ static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
j = *(volatile unsigned long *)kaddr;
*(volatile unsigned long *)kaddr = j;
- ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
+ ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL);
pte = pte_val(ptep);
// pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte);
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
@@ -125,7 +125,7 @@ inline int dvma_map_cpu(unsigned long kaddr,
do {
pr_debug("mapping %08lx phys to %08lx\n",
__pa(kaddr), vaddr);
- set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
+ set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr),
PAGE_KERNEL));
pte++;
kaddr += PAGE_SIZE;