@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
struct device;
struct page;
@@ -104,12 +105,16 @@ struct io_tlb_pool {
/**
* struct io_tlb_mem - Software IO TLB allocator
* @defpool: Default (initial) IO TLB memory pool descriptor.
+ * @pool: IO TLB memory pool descriptor (if not dynamic).
* @nslabs: Total number of IO TLB slabs in all pools.
* @debugfs: The dentry to debugfs.
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
* @can_grow: %true if more pools can be allocated dynamically.
* @phys_limit: Maximum allowed physical address.
+ * @lock: Lock to synchronize changes to the list.
+ * @pools: List of IO TLB memory pool descriptors (if dynamic).
+ * @dyn_alloc: Dynamic IO TLB pool allocation work.
* @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in
* debugfs.
@@ -125,6 +130,9 @@ struct io_tlb_mem {
#ifdef CONFIG_SWIOTLB_DYNAMIC
bool can_grow;
u64 phys_limit;
+ spinlock_t lock;
+ struct list_head pools;
+ struct work_struct dyn_alloc;
#endif
#ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;
@@ -79,8 +79,23 @@ struct io_tlb_slot {
static bool swiotlb_force_bounce;
static bool swiotlb_force_disable;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+
+static void swiotlb_dyn_alloc(struct work_struct *work);
+
+static struct io_tlb_mem io_tlb_default_mem = {
+ .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
+ .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
+ .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
+ swiotlb_dyn_alloc),
+};
+
+#else /* !CONFIG_SWIOTLB_DYNAMIC */
+
static struct io_tlb_mem io_tlb_default_mem;
+#endif /* CONFIG_SWIOTLB_DYNAMIC */
+
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
static unsigned long default_nareas;
@@ -278,6 +293,23 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
return;
}
+/**
+ * add_mem_pool() - add a memory pool to the allocator
+ * @mem: Software IO TLB allocator.
+ * @pool: Memory pool to be added.
+ */
+static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
+{
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ spin_lock(&mem->lock);
+ list_add_rcu(&pool->node, &mem->pools);
+ mem->nslabs += pool->nslabs;
+ spin_unlock(&mem->lock);
+#else
+ mem->nslabs = pool->nslabs;
+#endif
+}
+
static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
@@ -375,7 +407,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
default_nareas);
- io_tlb_default_mem.nslabs = nslabs;
+ add_mem_pool(&io_tlb_default_mem, mem);
if (flags & SWIOTLB_VERBOSE)
swiotlb_print_info();
@@ -474,7 +506,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
nareas);
- io_tlb_default_mem.nslabs = nslabs;
+ add_mem_pool(&io_tlb_default_mem, mem);
swiotlb_print_info();
return 0;
@@ -625,44 +657,83 @@ static void swiotlb_free_tlb(void *vaddr, size_t bytes)
/**
* swiotlb_alloc_pool() - allocate a new IO TLB memory pool
* @dev: Device for which a memory pool is allocated.
- * @nslabs: Desired number of slabs.
+ * @minslabs: Minimum number of slabs.
+ * @nslabs: Desired (maximum) number of slabs.
+ * @nareas: Number of areas.
* @phys_limit: Maximum DMA buffer physical address.
* @gfp: GFP flags for the allocations.
*
- * Allocate and initialize a new IO TLB memory pool.
+ * Allocate and initialize a new IO TLB memory pool. The actual number of
+ * slabs may be reduced if allocation of @nslabs fails. If even
+ * @minslabs cannot be allocated, this function fails.
*
* Return: New memory pool, or %NULL on allocation failure.
*/
static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
- unsigned int nslabs, u64 phys_limit, gfp_t gfp)
+ unsigned long minslabs, unsigned long nslabs,
+ unsigned int nareas, u64 phys_limit, gfp_t gfp)
{
struct io_tlb_pool *pool;
+ unsigned int slot_order;
struct page *tlb;
size_t pool_size;
size_t tlb_size;
- pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), 1) +
- array_size(sizeof(*pool->slots), nslabs);
+ pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
pool = kzalloc(pool_size, gfp);
if (!pool)
goto error;
pool->areas = (void *)pool + sizeof(*pool);
- pool->slots = (void *)pool->areas + sizeof(*pool->areas);
tlb_size = nslabs << IO_TLB_SHIFT;
- tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp);
- if (!tlb)
- goto error_tlb;
+ while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
+ if (nslabs <= minslabs)
+ goto error_tlb;
+ nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
+ nareas = limit_nareas(nareas, nslabs);
+ tlb_size = nslabs << IO_TLB_SHIFT;
+ }
- swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, 1);
+ slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
+ pool->slots = (struct io_tlb_slot *)
+ __get_free_pages(gfp, slot_order);
+ if (!pool->slots)
+ goto error_slots;
+
+ swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
return pool;
+error_slots:
+ swiotlb_free_tlb(page_address(tlb), tlb_size);
error_tlb:
kfree(pool);
error:
return NULL;
}
+/**
+ * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
+ * @work: Pointer to dyn_alloc in struct io_tlb_mem.
+ */
+static void swiotlb_dyn_alloc(struct work_struct *work)
+{
+ struct io_tlb_mem *mem =
+ container_of(work, struct io_tlb_mem, dyn_alloc);
+ struct io_tlb_pool *pool;
+
+ pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
+ default_nareas, mem->phys_limit, GFP_KERNEL);
+ if (!pool) {
+ pr_warn_ratelimited("Failed to allocate new pool");
+ return;
+ }
+
+ add_mem_pool(mem, pool);
+
+ /* Pairs with smp_rmb() in swiotlb_find_pool(). */
+ smp_wmb();
+}
+
/**
* swiotlb_dyn_free() - RCU callback to free a memory pool
* @rcu: RCU head in the corresponding struct io_tlb_pool.
@@ -670,8 +741,10 @@ static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
static void swiotlb_dyn_free(struct rcu_head *rcu)
{
struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
+ size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
size_t tlb_size = pool->end - pool->start;
+ free_pages((unsigned long)pool->slots, get_order(slots_size));
swiotlb_free_tlb(pool->vaddr, tlb_size);
kfree(pool);
}
@@ -689,15 +762,19 @@ static void swiotlb_dyn_free(struct rcu_head *rcu)
struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
- struct io_tlb_pool *pool = &mem->defpool;
-
- if (paddr >= pool->start && paddr < pool->end)
- return pool;
+ struct io_tlb_pool *pool;
- /* Pairs with smp_wmb() in swiotlb_find_slots(). */
+ /* Pairs with smp_wmb() in swiotlb_find_slots() and
+ * swiotlb_dyn_alloc(), which modify the RCU lists.
+ */
smp_rmb();
rcu_read_lock();
+ list_for_each_entry_rcu(pool, &mem->pools, node) {
+ if (paddr >= pool->start && paddr < pool->end)
+ goto out;
+ }
+
list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
if (paddr >= pool->start && paddr < pool->end)
goto out;
@@ -1046,18 +1123,24 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
u64 phys_limit;
int index;
- pool = &mem->defpool;
- index = swiotlb_pool_find_slots(dev, pool, orig_addr,
- alloc_size, alloc_align_mask);
- if (index >= 0)
- goto found;
-
+ rcu_read_lock();
+ list_for_each_entry_rcu(pool, &mem->pools, node) {
+ index = swiotlb_pool_find_slots(dev, pool, orig_addr,
+ alloc_size, alloc_align_mask);
+ if (index >= 0) {
+ rcu_read_unlock();
+ goto found;
+ }
+ }
+ rcu_read_unlock();
if (!mem->can_grow)
return -1;
+ schedule_work(&mem->dyn_alloc);
+
nslabs = nr_slots(alloc_size);
phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
- pool = swiotlb_alloc_pool(dev, nslabs, phys_limit,
+ pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
GFP_NOWAIT | __GFP_NOWARN);
if (!pool)
return -1;
@@ -1141,7 +1224,19 @@ static unsigned long mem_pool_used(struct io_tlb_pool *pool)
*/
static unsigned long mem_used(struct io_tlb_mem *mem)
{
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct io_tlb_pool *pool;
+ unsigned long used = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pool, &mem->pools, node)
+ used += mem_pool_used(pool);
+ rcu_read_unlock();
+
+ return used;
+#else
return mem_pool_used(&mem->defpool);
+#endif
}
#endif /* CONFIG_DEBUG_FS */
@@ -1563,7 +1658,10 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
false, nareas);
mem->force_bounce = true;
mem->for_alloc = true;
- mem->nslabs = nslabs;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ spin_lock_init(&mem->lock);
+#endif
+ add_mem_pool(mem, pool);
rmem->priv = mem;