s390/mm: Make virt_to_pfn() a static inline
Commit Message
Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.
For symmetry do the same with pfn_to_virt() reflecting the
current layout in asm-generic/page.h.
Doing this reveals a number of offenders in the arch code and
the S390-specific drivers, so just bite the bullet and fix up
all of those as well.
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
---
arch/s390/include/asm/kfence.h | 2 +-
arch/s390/include/asm/page.h | 12 ++++++++++--
arch/s390/mm/cmm.c | 2 +-
arch/s390/mm/vmem.c | 2 +-
drivers/s390/block/scm_blk.c | 2 +-
drivers/s390/char/vmcp.c | 2 +-
6 files changed, 15 insertions(+), 7 deletions(-)
---
base-commit: 06c2afb862f9da8dc5efa4b6076a0e48c3fbaaa5
change-id: 20230809-virt-to-phys-s390-2fa3d38b8855
Best regards,
Comments
On Fri, Aug 11, 2023 at 09:02:47AM +0200, Linus Walleij wrote:
> Making virt_to_pfn() a static inline taking a strongly typed
> (const void *) makes the contract of a passing a pointer of that
> type to the function explicit and exposes any misuse of the
> macro virt_to_pfn() acting polymorphic and accepting many types
> such as (void *), (unitptr_t) or (unsigned long) as arguments
> without warnings.
>
> For symmetry do the same with pfn_to_virt() reflecting the
> current layout in asm-generic/page.h.
>
> Doing this reveals a number of offenders in the arch code and
> the S390-specific drivers, so just bite the bullet and fix up
> all of those as well.
Funnily enough, except drivers/s390/char/vmcp.c none of affected
code pieces below is an offender. But anyway, to me it looks like
a nice improvement.
> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
> ---
> arch/s390/include/asm/kfence.h | 2 +-
> arch/s390/include/asm/page.h | 12 ++++++++++--
> arch/s390/mm/cmm.c | 2 +-
> arch/s390/mm/vmem.c | 2 +-
> drivers/s390/block/scm_blk.c | 2 +-
> drivers/s390/char/vmcp.c | 2 +-
> 6 files changed, 15 insertions(+), 7 deletions(-)
>
> diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
> index d55ba878378b..e47fd8cbe701 100644
> --- a/arch/s390/include/asm/kfence.h
> +++ b/arch/s390/include/asm/kfence.h
> @@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
>
> static inline bool kfence_protect_page(unsigned long addr, bool protect)
> {
> - __kernel_map_pages(virt_to_page(addr), 1, !protect);
> + __kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
> return true;
> }
>
> diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
> index a9c138fcd2ad..cfec0743314e 100644
> --- a/arch/s390/include/asm/page.h
> +++ b/arch/s390/include/asm/page.h
> @@ -191,8 +191,16 @@ int arch_make_page_accessible(struct page *page);
> #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
> #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
>
> -#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
> -#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
> +static inline void *pfn_to_virt(unsigned long pfn)
> +{
> + return __va(pfn_to_phys(pfn));
> +}
> +
> +static inline unsigned long virt_to_pfn(const void *kaddr)
> +{
> + return phys_to_pfn(__pa(kaddr));
> +}
> +
> #define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
>
> #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
> diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
> index 5300c6867d5e..f47515313226 100644
> --- a/arch/s390/mm/cmm.c
> +++ b/arch/s390/mm/cmm.c
> @@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
> } else
> free_page((unsigned long) npa);
> }
> - diag10_range(virt_to_pfn(addr), 1);
> + diag10_range(virt_to_pfn((void *)addr), 1);
> pa->pages[pa->index++] = addr;
> (*counter)++;
> spin_unlock(&cmm_lock);
> diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
> index b26649233d12..30cd6e1be10d 100644
> --- a/arch/s390/mm/vmem.c
> +++ b/arch/s390/mm/vmem.c
> @@ -36,7 +36,7 @@ static void vmem_free_pages(unsigned long addr, int order)
> {
> /* We don't expect boot memory to be removed ever. */
> if (!slab_is_available() ||
> - WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
> + WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
> return;
> free_pages(addr, order);
> }
> diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
> index 0c1df1d5f1ac..3a9cc8a4a230 100644
> --- a/drivers/s390/block/scm_blk.c
> +++ b/drivers/s390/block/scm_blk.c
> @@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq)
>
> if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
> IS_ALIGNED(aidaw, PAGE_SIZE))
> - mempool_free(virt_to_page(aidaw), aidaw_pool);
> + mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
> }
>
> spin_lock_irqsave(&list_lock, flags);
> diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
> index 4cebfaaa22b4..f66906da83c4 100644
> --- a/drivers/s390/char/vmcp.c
> +++ b/drivers/s390/char/vmcp.c
> @@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
> order = get_order(session->bufsize);
> nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
> if (session->cma_alloc) {
> - page = virt_to_page((unsigned long)session->response);
> + page = virt_to_page((void *)session->response);
The cast to (void *) is extra, if I read your commit message
correctly: "...makes the contract of a passing a pointer of that
type to the function explicit..."
> cma_release(vmcp_cma, page, nr_pages);
> session->cma_alloc = 0;
> } else {
>
> ---
> base-commit: 06c2afb862f9da8dc5efa4b6076a0e48c3fbaaa5
> change-id: 20230809-virt-to-phys-s390-2fa3d38b8855
Thanks, Linus!
> Best regards,
> --
> Linus Walleij <linus.walleij@linaro.org>
>
On Fri, Aug 11, 2023 at 3:44 PM Alexander Gordeev
<agordeev@linux.ibm.com> wrote:
> Funnily enough, except drivers/s390/char/vmcp.c none of affected
> code pieces below is an offender. But anyway, to me it looks like
> a nice improvement.
I'm puzzled, vmcp.c is a char * so actually not an offender
(I am trying to push a version without casting to the compile farm),
the rest are unsigned long passed to the function which now
(after my change) has const void * as argument?
Example:
> > @@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
unsigned long addr;
> > + diag10_range(virt_to_pfn((void *)addr), 1);
Yours,
Linus Walleij
On Fri, Aug 11, 2023 at 07:49:01PM +0200, Linus Walleij wrote:
> On Fri, Aug 11, 2023 at 3:44 PM Alexander Gordeev
> <agordeev@linux.ibm.com> wrote:
>
> > Funnily enough, except drivers/s390/char/vmcp.c none of affected
> > code pieces below is an offender. But anyway, to me it looks like
> > a nice improvement.
>
> I'm puzzled, vmcp.c is a char * so actually not an offender
> (I am trying to push a version without casting to the compile farm),
> the rest are unsigned long passed to the function which now
> (after my change) has const void * as argument?
>
> Example:
>
> > > @@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
>
> unsigned long addr;
>
> > > + diag10_range(virt_to_pfn((void *)addr), 1);
I only tried to say that these pieces weren't offenders before
you patch and turned ones after. But that seems like what your
commit message says.
> Yours,
> Linus Walleij
Thanks!
@@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
- __kernel_map_pages(virt_to_page(addr), 1, !protect);
+ __kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
return true;
}
@@ -191,8 +191,16 @@ int arch_make_page_accessible(struct page *page);
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
-#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
-#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn_to_phys(pfn));
+}
+
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return phys_to_pfn(__pa(kaddr));
+}
+
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
@@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
} else
free_page((unsigned long) npa);
}
- diag10_range(virt_to_pfn(addr), 1);
+ diag10_range(virt_to_pfn((void *)addr), 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
@@ -36,7 +36,7 @@ static void vmem_free_pages(unsigned long addr, int order)
{
/* We don't expect boot memory to be removed ever. */
if (!slab_is_available() ||
- WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
+ WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
return;
free_pages(addr, order);
}
@@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq)
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
- mempool_free(virt_to_page(aidaw), aidaw_pool);
+ mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
}
spin_lock_irqsave(&list_lock, flags);
@@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
if (session->cma_alloc) {
- page = virt_to_page((unsigned long)session->response);
+ page = virt_to_page((void *)session->response);
cma_release(vmcp_cma, page, nr_pages);
session->cma_alloc = 0;
} else {