[6.0,060/862] parisc: Fix userspace graphics card breakage due to pgtable special bit
Commit Message
Greg KH
Oct. 19, 2022, 8:22 a.m. UTC
From: Helge Deller <deller@gmx.de> commit 70be49f2f6223ddd2fcddb0089a40864c37e1494 upstream. Commit df24e1783e6e ("parisc: Add vDSO support") introduced the vDSO support, for which a _PAGE_SPECIAL page table flag was needed. Since we wanted to keep every page table entry in 32-bits, this patch re-used the existing - but yet unused - _PAGE_DMB flag (which triggers a hardware break if a page is accessed) to store the special bit. But when graphics card memory is mmapped into userspace, the kernel uses vm_iomap_memory() which sets the the special flag. So, with the DMB bit set, every access to the graphics memory now triggered a hardware exception and segfaulted the userspace program. Fix this breakage by dropping the DMB bit when writing the page protection bits to the CPU TLB. In addition this patch adds a small optimization: if huge pages aren't configured (which is at least the case for 32-bit kernels), then the special bit is stored in the hpage (HUGE PAGE) bit instead. That way we can skip to reset the DMB bit. Fixes: df24e1783e6e ("parisc: Add vDSO support") Cc: <stable@vger.kernel.org> # 5.18+ Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> --- arch/parisc/include/asm/pgtable.h | 7 ++++++- arch/parisc/kernel/entry.S | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-)
--- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -192,6 +192,11 @@ extern void __update_cache(pte_t pte); #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ +#ifdef CONFIG_HUGETLB_PAGE +#define _PAGE_SPECIAL_BIT _PAGE_DMB_BIT /* DMB feature is currently unused */ +#else +#define _PAGE_SPECIAL_BIT _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */ +#endif /* N.B. The bits are defined in terms of a 32 bit word above, so the */ /* following macro is ok for both 32 and 64 bit. */ @@ -219,7 +224,7 @@ extern void __update_cache(pte_t pte); #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) -#define _PAGE_SPECIAL (_PAGE_DMB) +#define _PAGE_SPECIAL (1 << xlate_pabit(_PAGE_SPECIAL_BIT)) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -499,6 +499,10 @@ * Finally, _PAGE_READ goes in the top bit of PL1 (so we * trigger an access rights trap in user space if the user * tries to read an unreadable page */ +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif depd \pte,8,7,\prot /* PAGE_USER indicates the page can be read with user privileges, @@ -529,6 +533,10 @@ * makes the tlb entry for the differently formatted pa11 * insertion instructions */ .macro make_insert_tlb_11 spc,pte,prot +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif zdep \spc,30,15,\prot dep \pte,8,7,\prot extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0