[v3,2/2] dma-mapping: fix dma_addressing_limited() if dma_range_map can't cover all system RAM
Commit Message
There is an unusual case that the range map covers right up to the top
of system RAM, but leaves a hole somewhere lower down. Then it prevents
the nvme device dma mapping in the checking path of phys_to_dma() and
causes the hangs at boot.
E.g. On an Armv8 Ampere server, the dsdt ACPI table is:
Method (_DMA, 0, Serialized) // _DMA: Direct Memory Access
{
Name (RBUF, ResourceTemplate ()
{
QWordMemory (ResourceConsumer, PosDecode, MinFixed,
MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000000000000000, // Range Minimum
0x00000000FFFFFFFF, // Range Maximum
0x0000000000000000, // Translation Offset
0x0000000100000000, // Length
,, , AddressRangeMemory, TypeStatic)
QWordMemory (ResourceConsumer, PosDecode, MinFixed,
MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000006010200000, // Range Minimum
0x000000602FFFFFFF, // Range Maximum
0x0000000000000000, // Translation Offset
0x000000001FE00000, // Length
,, , AddressRangeMemory, TypeStatic)
QWordMemory (ResourceConsumer, PosDecode, MinFixed,
MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x00000060F0000000, // Range Minimum
0x00000060FFFFFFFF, // Range Maximum
0x0000000000000000, // Translation Offset
0x0000000010000000, // Length
,, , AddressRangeMemory, TypeStatic)
QWordMemory (ResourceConsumer, PosDecode, MinFixed,
MaxFixed, Cacheable, ReadWrite,
0x0000000000000000, // Granularity
0x0000007000000000, // Range Minimum
0x000003FFFFFFFFFF, // Range Maximum
0x0000000000000000, // Translation Offset
0x0000039000000000, // Length
,, , AddressRangeMemory, TypeStatic)
})
But the System RAM ranges are:
cat /proc/iomem |grep -i ram
90000000-91ffffff : System RAM
92900000-fffbffff : System RAM
880000000-fffffffff : System RAM
8800000000-bff5990fff : System RAM
bff59d0000-bff5a4ffff : System RAM
bff8000000-bfffffffff : System RAM
So some RAM ranges are out of dma_range_map.
Fix it by checking whether each of the system RAM resources can be
properly encompassed within the dma_range_map.
Signed-off-by: Jia He <justin.he@arm.com>
---
kernel/dma/mapping.c | 49 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 47 insertions(+), 2 deletions(-)
Comments
> + */
> +static int check_ram_in_range_map(unsigned long start_pfn,
> + unsigned long nr_pages, void *data)
> +{
> + unsigned long end_pfn = start_pfn + nr_pages;
> + struct device *dev = (struct device *)data;
No need for the cast here.
> + struct bus_dma_region *bdr = NULL;
> + const struct bus_dma_region *m;
> +
> + while (start_pfn < end_pfn) {
> + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
> + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
> +
> + if (start_pfn >= cpu_start_pfn
> + && start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
Linux coding style keeps the && on the previous line.
> + bdr = (struct bus_dma_region *)m;
If you also declared bdr as const this should be able to do away with
the cast.
> bool dma_addressing_limited(struct device *dev)
> {
> - return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
> - dma_get_required_mask(dev);
> + if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
> + dma_get_required_mask(dev))
> + return true;
> +
> + return !all_ram_in_dma_range_map(dev);
So, all the dma range map thing is really a dma-direct concept. So I
think here in dma_addressing_limited we should just do a:
if (likely(!ops))
return !dma_direct_all_ram_mapped(dev));
return false
with dma_direct_all_ram_mapped move to dma-direct.c.
@@ -7,6 +7,7 @@
*/
#include <linux/memblock.h> /* for max_pfn */
#include <linux/acpi.h>
+#include <linux/dma-direct.h> /* for bus_dma_region */
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -793,6 +794,47 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_set_coherent_mask);
+/*
+ * To check whether all ram resource ranges are covered by dma range map
+ * Returns 0 when continuous check is needed
+ * Returns 1 if there is some RAM range can't be covered by dma_range_map
+ */
+static int check_ram_in_range_map(unsigned long start_pfn,
+ unsigned long nr_pages, void *data)
+{
+ unsigned long end_pfn = start_pfn + nr_pages;
+ struct device *dev = (struct device *)data;
+ struct bus_dma_region *bdr = NULL;
+ const struct bus_dma_region *m;
+
+ while (start_pfn < end_pfn) {
+ for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
+ unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
+
+ if (start_pfn >= cpu_start_pfn
+ && start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
+ bdr = (struct bus_dma_region *)m;
+ break;
+ }
+ }
+ if (!bdr)
+ return 1;
+
+ start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
+ }
+
+ return 0;
+}
+
+static bool all_ram_in_dma_range_map(struct device *dev)
+{
+ if (!dev->dma_range_map)
+ return 1;
+
+ return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
+ check_ram_in_range_map);
+}
+
/**
* dma_addressing_limited - return if the device is addressing limited
* @dev: device to check
@@ -803,8 +845,11 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
*/
bool dma_addressing_limited(struct device *dev)
{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
- dma_get_required_mask(dev);
+ if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
+ dma_get_required_mask(dev))
+ return true;
+
+ return !all_ram_in_dma_range_map(dev);
}
EXPORT_SYMBOL(dma_addressing_limited);