[02/12] m68k: Pass a pointer to virt_to_pfn() virt_to_page()

Message ID 20230503-virt-to-pfn-v6-4-rc1-v1-2-6c4698dcf9c8@linaro.org
State New
Headers
Series arch: Make virt_to_pfn into a static inline |

Commit Message

Linus Walleij May 11, 2023, 11:59 a.m. UTC
  Functions that work on a pointer to virtual memory such as
virt_to_pfn() and users of that function such as
virt_to_page() are supposed to pass a pointer to virtual
memory, ideally a (void *) or other pointer. However since
many architectures implement virt_to_pfn() as a macro,
this function becomes polymorphic and accepts both a
(unsigned long) and a (void *).

Fix up the offending calls in arch/m68k with explicit casts.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
---
ChangeLog v1->v2:
- Add an extra parens around the page argument to the
  PD_PTABLE() macro, as is normally required.
---
 arch/m68k/mm/motorola.c | 4 ++--
 arch/m68k/mm/sun3mmu.c  | 2 +-
 arch/m68k/sun3/dvma.c   | 2 +-
 arch/m68k/sun3x/dvma.c  | 2 +-
 4 files changed, 5 insertions(+), 5 deletions(-)
  

Comments

Geert Uytterhoeven May 12, 2023, 9:55 a.m. UTC | #1
Hi Linus,

On Thu, May 11, 2023 at 1:59 PM Linus Walleij <linus.walleij@linaro.org> wrote:
> Functions that work on a pointer to virtual memory such as
> virt_to_pfn() and users of that function such as
> virt_to_page() are supposed to pass a pointer to virtual
> memory, ideally a (void *) or other pointer. However since
> many architectures implement virt_to_pfn() as a macro,
> this function becomes polymorphic and accepts both a
> (unsigned long) and a (void *).
>
> Fix up the offending calls in arch/m68k with explicit casts.
>
> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
> ---
> ChangeLog v1->v2:
> - Add an extra parens around the page argument to the
>   PD_PTABLE() macro, as is normally required.

Thanks for the update!

To build sun3_defconfig and m5475evb_defconfig cleanly, you need to
include the (Gmail-whitespace-damaged) changes below.
These were compile-tested only.

diff --git a/arch/m68k/include/asm/mcf_pgtable.h
b/arch/m68k/include/asm/mcf_pgtable.h
index 96d069829803505c..46ae379bb14d5e05 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -135,7 +135,7 @@ static inline void pte_clear(struct mm_struct *mm,
unsigned long addr,
 }

 #define pte_pagenr(pte)        ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
-#define pte_page(pte)  virt_to_page(__pte_page(pte))
+#define pte_page(pte)  virt_to_page((void *)__pte_page(pte))

 static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
 #define pmd_none(pmd) pmd_none2(&(pmd))
diff --git a/arch/m68k/include/asm/sun3_pgtable.h
b/arch/m68k/include/asm/sun3_pgtable.h
index e582b0484a55cd82..f3e7728f58cd9dd0 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -109,9 +109,9 @@ static inline void pte_clear (struct mm_struct
*mm, unsigned long addr, pte_t *p
 #define pfn_pte(pfn, pgprot) \
 ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })

-#define pte_page(pte)          virt_to_page(__pte_page(pte))
+#define pte_page(pte)          virt_to_page((void *)__pte_page(pte))
 #define pmd_pfn(pmd)           (pmd_val(pmd) >> PAGE_SHIFT)
-#define pmd_page(pmd)          virt_to_page(pmd_page_vaddr(pmd))
+#define pmd_page(pmd)          virt_to_page((void *)pmd_page_vaddr(pmd))


 static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 70aa0979e02710a8..a4c552c7e2c8ca12 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -69,7 +69,7 @@ void __init paging_init(void)

                /* now change pg_table to kernel virtual addresses */
                for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
-                       pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+                       pte_t pte = pfn_pte(virt_to_pfn((void
*)address), PAGE_INIT);
                        if (address >= (unsigned long) high_memory)
                                pte_val(pte) = 0;

Gr{oetje,eeting}s,

                        Geert
  
Geert Uytterhoeven May 12, 2023, 10:26 a.m. UTC | #2
On Fri, May 12, 2023 at 11:55 AM Geert Uytterhoeven
<geert@linux-m68k.org> wrote:
> On Thu, May 11, 2023 at 1:59 PM Linus Walleij <linus.walleij@linaro.org> wrote:
> > Functions that work on a pointer to virtual memory such as
> > virt_to_pfn() and users of that function such as
> > virt_to_page() are supposed to pass a pointer to virtual
> > memory, ideally a (void *) or other pointer. However since
> > many architectures implement virt_to_pfn() as a macro,
> > this function becomes polymorphic and accepts both a
> > (unsigned long) and a (void *).
> >
> > Fix up the offending calls in arch/m68k with explicit casts.
> >
> > Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
> > ---
> > ChangeLog v1->v2:
> > - Add an extra parens around the page argument to the
> >   PD_PTABLE() macro, as is normally required.
>
> Thanks for the update!
>
> To build sun3_defconfig and m5475evb_defconfig cleanly, you need to
> include the (Gmail-whitespace-damaged) changes below.
> These were compile-tested only.

> --- a/arch/m68k/include/asm/sun3_pgtable.h
> +++ b/arch/m68k/include/asm/sun3_pgtable.h
> @@ -109,9 +109,9 @@ static inline void pte_clear (struct mm_struct
> *mm, unsigned long addr, pte_t *p
>  #define pfn_pte(pfn, pgprot) \
>  ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
>
> -#define pte_page(pte)          virt_to_page(__pte_page(pte))
> +#define pte_page(pte)          virt_to_page((void *)__pte_page(pte))

Much simpler to drop the cast in __pte_page() instead:

@@ -91,7 +91,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pmd_set(pmdp,ptep) do {} while (0)

 #define __pte_page(pte) \
-((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
+        (__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))

 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 {

>  #define pmd_pfn(pmd)           (pmd_val(pmd) >> PAGE_SHIFT)
> -#define pmd_page(pmd)          virt_to_page(pmd_page_vaddr(pmd))
> +#define pmd_page(pmd)          virt_to_page((void *)pmd_page_vaddr(pmd))

Gr{oetje,eeting}s,

                        Geert
  
Linus Walleij May 16, 2023, 9:18 a.m. UTC | #3
On Fri, May 12, 2023 at 12:26 PM Geert Uytterhoeven
<geert@linux-m68k.org> wrote:

> Much simpler to drop the cast in __pte_page() instead:
>
> @@ -91,7 +91,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
>  #define pmd_set(pmdp,ptep) do {} while (0)
>
>  #define __pte_page(pte) \
> -((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
> +        (__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))

Thanks, folded this into the patch!

Yours,
Linus Walleij
  

Patch

diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 911301224078..c75984e2d86b 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -102,7 +102,7 @@  static struct list_head ptable_list[2] = {
 	LIST_HEAD_INIT(ptable_list[1]),
 };
 
-#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
+#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
 
@@ -201,7 +201,7 @@  int free_pointer_table(void *table, int type)
 		list_del(dp);
 		mmu_page_dtor((void *)page);
 		if (type == TABLE_PTE)
-			pgtable_pte_page_dtor(virt_to_page(page));
+			pgtable_pte_page_dtor(virt_to_page((void *)page));
 		free_page (page);
 		return 1;
 	} else if (ptable_list[type].next != dp) {
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index b619d0d4319c..c5e6a23e0262 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -75,7 +75,7 @@  void __init paging_init(void)
 		/* now change pg_table to kernel virtual addresses */
 		pg_table = (pte_t *) __va ((unsigned long) pg_table);
 		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
-			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+			pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT);
 			if (address >= (unsigned long)high_memory)
 				pte_val (pte) = 0;
 			set_pte (pg_table, pte);
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index f15ff16b9997..83fcae6a0e79 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -29,7 +29,7 @@  static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
 	j = *(volatile unsigned long *)kaddr;
 	*(volatile unsigned long *)kaddr = j;
 
-	ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
+	ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL);
 	pte = pte_val(ptep);
 //	pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte);
 	if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
index 08bb92113026..a6034ba05845 100644
--- a/arch/m68k/sun3x/dvma.c
+++ b/arch/m68k/sun3x/dvma.c
@@ -125,7 +125,7 @@  inline int dvma_map_cpu(unsigned long kaddr,
 			do {
 				pr_debug("mapping %08lx phys to %08lx\n",
 					 __pa(kaddr), vaddr);
-				set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
+				set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr),
 						     PAGE_KERNEL));
 				pte++;
 				kaddr += PAGE_SIZE;