[11/11] dmapool: link blocks across pages

Message ID 20221205145937.54367-12-kbusch@meta.com
State New
Headers
Series dmapool enhancements |

Commit Message

Keith Busch Dec. 5, 2022, 2:59 p.m. UTC
  From: Keith Busch <kbusch@kernel.org>

The allocated dmapool pages are never freed for the lifetime of the
pool. There is no need for the two level list+stack lookup for finding a
free block since nothing is ever removed from the list. Just use a
simple stack, reducing time complexity to constant.

The implementation inserts the stack linking elements and the dma handle
of the block within itself when freed. This means the smallest possible
dmapool block is increased to at most 16 bytes to accomodate these
fields, but there are no exisiting users requesting a dma pool smaller
than that anyway.

Removing the list has a significant change in performance. Using the
kernel's micro-benchmarking self test:

Before:

  # modprobe dmapool_test
  dmapool test: size:16   blocks:8192   time:57282
  dmapool test: size:64   blocks:8192   time:172562
  dmapool test: size:256  blocks:8192   time:789247
  dmapool test: size:1024 blocks:2048   time:371823
  dmapool test: size:4096 blocks:1024   time:362237

After:

  # modprobe dmapool_test
  dmapool test: size:16   blocks:8192   time:24997
  dmapool test: size:64   blocks:8192   time:26584
  dmapool test: size:256  blocks:8192   time:33542
  dmapool test: size:1024 blocks:2048   time:9022
  dmapool test: size:4096 blocks:1024   time:6045

The module test allocates quite a few blocks that may not accurately
represent how these pools are used in real life. For a more marco level
benchmark, running fio high-depth + high-batched on nvme, this patch
shows submission and completion latency reduced by ~100usec each, 1%
IOPs improvement, and perf record's time spent in dma_pool_alloc/free
were reduced by half.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 mm/dmapool.c | 213 ++++++++++++++++++++++++++-------------------------
 1 file changed, 108 insertions(+), 105 deletions(-)
  

Comments

Tony Battersby Dec. 5, 2022, 6:07 p.m. UTC | #1
On 12/5/22 09:59, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
>
> The allocated dmapool pages are never freed for the lifetime of the
> pool. There is no need for the two level list+stack lookup for finding a
> free block since nothing is ever removed from the list. Just use a
> simple stack, reducing time complexity to constant.
>
> The implementation inserts the stack linking elements and the dma handle
> of the block within itself when freed. This means the smallest possible
> dmapool block is increased to at most 16 bytes to accomodate these
> fields, but there are no exisiting users requesting a dma pool smaller
> than that anyway.

Great work!

I notice that the comment at the top of dmapool.c describes the old
design ("Free blocks are tracked in an unsorted singly-linked
list of free blocks within the page."), so you need to delete or update
that part of the comment.

>  struct dma_pool {		/* the pool */
>  	struct list_head page_list;
>  	spinlock_t lock;
>  	struct device *dev;
> +	struct dma_block *next_block;
>  	unsigned int size;
>  	unsigned int allocation;
>  	unsigned int boundary;
> +	unsigned int nr_blocks;
> +	unsigned int nr_active;
> +	unsigned int nr_pages;

I think nr_blocks, nr_active, and nr_pages should be size_t rather than
unsigned int since they count the number of objects in the entire pool,
and it would be theoretically possible to allocate more than 2^32 objects.


> @@ -199,22 +217,24 @@ EXPORT_SYMBOL(dma_pool_create);
>  
>  static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
>  {
> -	unsigned int offset = 0;
> -	unsigned int next_boundary = pool->boundary;
> -
> -	page->in_use = 0;
> -	page->offset = 0;
> -	do {
> -		unsigned int next = offset + pool->size;
> -		if (unlikely((next + pool->size) >= next_boundary)) {
> -			next = next_boundary;
> +	unsigned int next_boundary = pool->boundary, offset = 0;
> +	struct dma_block *block;
> +
> +	while (offset < pool->allocation) {
> +		if (offset > next_boundary) {

This is incorrect.  I believe the correct comparison should be:

+    while (offset + pool->size <= pool->allocation) {
+        if (offset + pool->size > next_boundary) {

That should handle all the weird possible combinations of size,
boundary, and allocation.

Tony Battersby
Cybernetics
  
kernel test robot Dec. 5, 2022, 10:34 p.m. UTC | #2
Hi Keith,

I love your patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v6.1-rc8]
[cannot apply to next-20221205]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Keith-Busch/dmapool-enhancements/20221205-232116
patch link:    https://lore.kernel.org/r/20221205145937.54367-12-kbusch%40meta.com
patch subject: [PATCH 11/11] dmapool: link blocks across pages
config: x86_64-randconfig-a006-20221205
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/3717500cb5479136121a65d22d48f4b5e940bba4
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Keith-Busch/dmapool-enhancements/20221205-232116
        git checkout 3717500cb5479136121a65d22d48f4b5e940bba4
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> mm/dmapool.c:306:6: error: incompatible pointer types initializing 'u8 *' (aka 'unsigned char *') with an expression of type 'struct dma_block *' [-Werror,-Wincompatible-pointer-types]
           u8 *data = block;
               ^      ~~~~~
   1 error generated.


vim +306 mm/dmapool.c

   301	
   302	static inline void pool_check_block(struct dma_pool *pool, struct dma_block *block,
   303					    gfp_t mem_flags)
   304	{
   305	#ifdef DMAPOOL_DEBUG
 > 306		u8 *data = block;
   307		int i;
   308	
   309		for (i = sizeof(struct dma_block); i < pool->size; i++) {
   310			if (data[i] == POOL_POISON_FREED)
   311				continue;
   312			dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
   313				pool->name, block);
   314	
   315			/*
   316			 * Dump the first 4 bytes even if they are not
   317			 * POOL_POISON_FREED
   318			 */
   319			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
   320					data, pool->size, 1);
   321			break;
   322		}
   323	
   324		if (!want_init_on_alloc(mem_flags))
   325			memset(block, POOL_POISON_ALLOCATED, pool->size);
   326	#endif
   327	}
   328
  
kernel test robot Dec. 11, 2022, 3:24 p.m. UTC | #3
Greeting,

FYI, we noticed BUG:unable_to_handle_page_fault_for_address due to commit (built with gcc-11):

commit: 3717500cb5479136121a65d22d48f4b5e940bba4 ("[PATCH 11/11] dmapool: link blocks across pages")
url: https://github.com/intel-lab-lkp/linux/commits/Keith-Busch/dmapool-enhancements/20221205-232116
base: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git 76dcd734eca23168cb008912c0f69ff408905235
patch link: https://lore.kernel.org/all/20221205145937.54367-12-kbusch@meta.com/
patch subject: [PATCH 11/11] dmapool: link blocks across pages

in testcase: nvml
version: nvml-x86_64-ec9fc0404-1_20221026
with following parameters:

	test: pmem
	group: libpmempool
	nr_pmem: 1
	fs: ext4
	mount_option: dax
	bp_memmap: 32G!4G



on test machine: 16 threads 1 sockets Intel(R) Xeon(R) CPU D-1541 @ 2.10GHz (Broadwell-DE) with 48G memory

caused below changes (please refer to attached dmesg/kmsg for entire log/backtrace):


If you fix the issue, kindly add following tag
| Reported-by: kernel test robot <oliver.sang@intel.com>
| Link: https://lore.kernel.org/oe-lkp/202212112301.ad0819f7-oliver.sang@intel.com


[   25.268833][    T1] BUG: unable to handle page fault for address: ffff88807bdb6000
[   25.269731][    T1] #PF: supervisor write access in kernel mode
[   25.269731][    T1] #PF: error_code(0x0002) - not-present page
[   25.269731][    T1] PGD c7fe01067 P4D c7fe01067 PUD c7fe04067 PMD c7fe07067 PTE 0
[   25.269731][    T1] Oops: 0002 [#1] SMP KASAN PTI
[   25.269731][    T1] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 6.1.0-rc8-00011-g3717500cb547 #1
[   25.269731][    T1] Hardware name: Supermicro SYS-5018D-FN4T/X10SDV-8C-TLN4F, BIOS 1.1 03/02/2016
[ 25.269731][ T1] RIP: 0010:memset_erms (arch/x86/lib/memset_64.S:64) 
[ 25.269731][ T1] Code: c1 e9 03 40 0f b6 f6 48 b8 01 01 01 01 01 01 01 01 48 0f af c6 f3 48 ab 89 d1 f3 aa 4c 89 c8 c3 90 49 89 f9 40 88 f0 48 89 d1 <f3> aa 4c 89 c8 c3 90 49 89 fa 40 0f b6 ce 48 b8 01 01 01 01 01 01
All code
========
   0:	c1 e9 03             	shr    $0x3,%ecx
   3:	40 0f b6 f6          	movzbl %sil,%esi
   7:	48 b8 01 01 01 01 01 	movabs $0x101010101010101,%rax
   e:	01 01 01 
  11:	48 0f af c6          	imul   %rsi,%rax
  15:	f3 48 ab             	rep stos %rax,%es:(%rdi)
  18:	89 d1                	mov    %edx,%ecx
  1a:	f3 aa                	rep stos %al,%es:(%rdi)
  1c:	4c 89 c8             	mov    %r9,%rax
  1f:	c3                   	retq   
  20:	90                   	nop
  21:	49 89 f9             	mov    %rdi,%r9
  24:	40 88 f0             	mov    %sil,%al
  27:	48 89 d1             	mov    %rdx,%rcx
  2a:*	f3 aa                	rep stos %al,%es:(%rdi)		<-- trapping instruction
  2c:	4c 89 c8             	mov    %r9,%rax
  2f:	c3                   	retq   
  30:	90                   	nop
  31:	49 89 fa             	mov    %rdi,%r10
  34:	40 0f b6 ce          	movzbl %sil,%ecx
  38:	48                   	rex.W
  39:	b8 01 01 01 01       	mov    $0x1010101,%eax
  3e:	01 01                	add    %eax,(%rcx)

Code starting with the faulting instruction
===========================================
   0:	f3 aa                	rep stos %al,%es:(%rdi)
   2:	4c 89 c8             	mov    %r9,%rax
   5:	c3                   	retq   
   6:	90                   	nop
   7:	49 89 fa             	mov    %rdi,%r10
   a:	40 0f b6 ce          	movzbl %sil,%ecx
   e:	48                   	rex.W
   f:	b8 01 01 01 01       	mov    $0x1010101,%eax
  14:	01 01                	add    %eax,(%rcx)
[   25.269731][    T1] RSP: 0000:ffffc90000057630 EFLAGS: 00010202
[   25.269731][    T1] RAX: 0000000000000000 RBX: ffff888952c8e400 RCX: 0000000000000020
[   25.269731][    T1] RDX: 0000000000000060 RSI: 0000000000000000 RDI: ffff88807bdb6000
[   25.269731][    T1] RBP: 000000007bdb5fc0 R08: 0000000000000001 R09: ffff88807bdb5fc0
[   25.269731][    T1] R10: ffffed100f7b6c03 R11: 0000000000000001 R12: ffff88807bdb5fc0
[   25.269731][    T1] R13: ffff88807bdb5fc0 R14: 0000000000000100 R15: ffff888952c8e410
[   25.269731][    T1] FS:  0000000000000000(0000) GS:ffff888b9fa80000(0000) knlGS:0000000000000000
[   25.269731][    T1] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   25.269731][    T1] CR2: ffff88807bdb6000 CR3: 0000000c7e20e001 CR4: 00000000003706e0
[   25.269731][    T1] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[   25.269731][    T1] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[   25.269731][    T1] Call Trace:
[   25.269731][    T1]  <TASK>
[ 25.269731][ T1] dma_pool_alloc (mm/dmapool.c:370) 
[ 25.269731][ T1] ? kasan_set_track (mm/kasan/common.c:52) 
[ 25.269731][ T1] ehci_qh_alloc (drivers/usb/host/ehci-mem.c:75) 
[ 25.269731][ T1] ? ehci_qtd_alloc (drivers/usb/host/ehci-mem.c:68) 
[ 25.269731][ T1] ? dma_pool_create (mm/dmapool.c:215) 
[ 25.269731][ T1] ehci_mem_init+0x10a/0x780 
[ 25.269731][ T1] ? __hrtimer_init (kernel/time/hrtimer.c:1559) 
[ 25.269731][ T1] ehci_init (drivers/usb/host/ehci-hcd.c:504) 
[ 25.269731][ T1] ehci_setup (drivers/usb/early/ehci-dbgp.c:811 (discriminator 3)) 
[ 25.269731][ T1] ehci_pci_setup (drivers/usb/host/ehci-pci.c:261) 
[ 25.269731][ T1] ? ehci_pci_resume (drivers/usb/host/ehci-pci.c:91) 
[ 25.269731][ T1] ? mutex_unlock (arch/x86/include/asm/atomic64_64.h:190 include/linux/atomic/atomic-long.h:449 include/linux/atomic/atomic-instrumented.h:1790 kernel/locking/mutex.c:181 kernel/locking/mutex.c:540) 
[ 25.269731][ T1] ? __mutex_unlock_slowpath+0x2a0/0x2a0 
[ 25.269731][ T1] ? usb_alloc_dev (drivers/usb/core/usb.c:670) 
[ 25.269731][ T1] usb_add_hcd.cold (drivers/usb/core/hcd.c:2939) 
[ 25.269731][ T1] usb_hcd_pci_probe (drivers/usb/core/hcd-pci.c:259) 
[ 25.269731][ T1] ? ehci_pci_remove (drivers/usb/host/ehci-pci.c:382) 
[ 25.269731][ T1] local_pci_probe (drivers/pci/pci-driver.c:324) 
[ 25.269731][ T1] pci_call_probe (drivers/pci/pci-driver.c:392) 
[ 25.269731][ T1] ? _raw_spin_lock (arch/x86/include/asm/atomic.h:202 include/linux/atomic/atomic-instrumented.h:543 include/asm-generic/qspinlock.h:111 include/linux/spinlock.h:186 include/linux/spinlock_api_smp.h:134 kernel/locking/spinlock.c:154) 
[ 25.269731][ T1] ? pci_pm_suspend_noirq (drivers/pci/pci-driver.c:352) 
[ 25.269731][ T1] ? pci_assign_irq (drivers/pci/setup-irq.c:25) 
[ 25.269731][ T1] ? pci_match_device (drivers/pci/pci-driver.c:108 drivers/pci/pci-driver.c:159) 
[ 25.269731][ T1] ? kernfs_put (arch/x86/include/asm/atomic.h:123 (discriminator 1) include/linux/atomic/atomic-instrumented.h:576 (discriminator 1) fs/kernfs/dir.c:536 (discriminator 1)) 
[ 25.269731][ T1] pci_device_probe (drivers/pci/pci-driver.c:461) 
[ 25.269731][ T1] really_probe (drivers/base/dd.c:560 drivers/base/dd.c:639) 
[ 25.269731][ T1] __driver_probe_device (drivers/base/dd.c:719 drivers/base/dd.c:776) 
[ 25.269731][ T1] driver_probe_device (drivers/base/dd.c:808) 
[ 25.269731][ T1] __driver_attach (drivers/base/dd.c:1191) 
[ 25.269731][ T1] ? __device_attach_driver (drivers/base/dd.c:1135) 
[ 25.269731][ T1] bus_for_each_dev (drivers/base/bus.c:301) 
[ 25.269731][ T1] ? subsys_dev_iter_exit (drivers/base/bus.c:290) 
[ 25.269731][ T1] ? __kmem_cache_alloc_node (mm/slub.c:3400 mm/slub.c:3437) 
[ 25.269731][ T1] ? klist_add_tail (include/linux/list.h:69 include/linux/list.h:102 lib/klist.c:104 lib/klist.c:137) 
[ 25.269731][ T1] bus_add_driver (drivers/base/bus.c:618) 
[ 25.269731][ T1] driver_register (drivers/base/driver.c:246) 
[ 25.269731][ T1] ? ehci_hcd_init (drivers/usb/host/ehci-pci.c:422) 
[ 25.269731][ T1] do_one_initcall (init/main.c:1303) 
[ 25.269731][ T1] ? trace_event_raw_event_initcall_level (init/main.c:1294) 
[ 25.269731][ T1] ? parse_one (kernel/params.c:170) 
[ 25.269731][ T1] ? do_initcalls (init/main.c:1386) 
[ 25.269731][ T1] ? kasan_set_track (mm/kasan/common.c:52) 
[ 25.269731][ T1] ? __kasan_kmalloc (mm/kasan/common.c:381) 
[ 25.269731][ T1] do_initcalls (init/main.c:1375 init/main.c:1392) 
[ 25.269731][ T1] kernel_init_freeable (init/main.c:1635) 
[ 25.269731][ T1] ? console_on_rootfs (init/main.c:1601) 
[ 25.269731][ T1] ? usleep_range_state (kernel/time/timer.c:1897) 
[ 25.269731][ T1] ? _raw_spin_lock_bh (kernel/locking/spinlock.c:169) 
[ 25.269731][ T1] ? rest_init (init/main.c:1511) 
[ 25.269731][ T1] ? rest_init (init/main.c:1511) 
[ 25.269731][ T1] kernel_init (init/main.c:1521) 
[ 25.269731][ T1] ret_from_fork (arch/x86/entry/entry_64.S:312) 
[   25.269731][    T1]  </TASK>
[   25.269731][    T1] Modules linked in:
[   25.269731][    T1] CR2: ffff88807bdb6000
[   25.269731][    T1] ---[ end trace 0000000000000000 ]---
[ 25.269731][ T1] RIP: 0010:memset_erms (arch/x86/lib/memset_64.S:64) 
[ 25.269731][ T1] Code: c1 e9 03 40 0f b6 f6 48 b8 01 01 01 01 01 01 01 01 48 0f af c6 f3 48 ab 89 d1 f3 aa 4c 89 c8 c3 90 49 89 f9 40 88 f0 48 89 d1 <f3> aa 4c 89 c8 c3 90 49 89 fa 40 0f b6 ce 48 b8 01 01 01 01 01 01
All code
========
   0:	c1 e9 03             	shr    $0x3,%ecx
   3:	40 0f b6 f6          	movzbl %sil,%esi
   7:	48 b8 01 01 01 01 01 	movabs $0x101010101010101,%rax
   e:	01 01 01 
  11:	48 0f af c6          	imul   %rsi,%rax
  15:	f3 48 ab             	rep stos %rax,%es:(%rdi)
  18:	89 d1                	mov    %edx,%ecx
  1a:	f3 aa                	rep stos %al,%es:(%rdi)
  1c:	4c 89 c8             	mov    %r9,%rax
  1f:	c3                   	retq   
  20:	90                   	nop
  21:	49 89 f9             	mov    %rdi,%r9
  24:	40 88 f0             	mov    %sil,%al
  27:	48 89 d1             	mov    %rdx,%rcx
  2a:*	f3 aa                	rep stos %al,%es:(%rdi)		<-- trapping instruction
  2c:	4c 89 c8             	mov    %r9,%rax
  2f:	c3                   	retq   
  30:	90                   	nop
  31:	49 89 fa             	mov    %rdi,%r10
  34:	40 0f b6 ce          	movzbl %sil,%ecx
  38:	48                   	rex.W
  39:	b8 01 01 01 01       	mov    $0x1010101,%eax
  3e:	01 01                	add    %eax,(%rcx)

Code starting with the faulting instruction
===========================================
   0:	f3 aa                	rep stos %al,%es:(%rdi)
   2:	4c 89 c8             	mov    %r9,%rax
   5:	c3                   	retq   
   6:	90                   	nop
   7:	49 89 fa             	mov    %rdi,%r10
   a:	40 0f b6 ce          	movzbl %sil,%ecx
   e:	48                   	rex.W
   f:	b8 01 01 01 01       	mov    $0x1010101,%eax
  14:	01 01                	add    %eax,(%rcx)


To reproduce:

        git clone https://github.com/intel/lkp-tests.git
        cd lkp-tests
        sudo bin/lkp install job.yaml           # job file is attached in this email
        bin/lkp split-job --compatible job.yaml # generate the yaml file for lkp run
        sudo bin/lkp run generated-yaml-file

        # if come across any failure that blocks the test,
        # please remove ~/.lkp and /lkp dir to run from a clean state.
  

Patch

diff --git a/mm/dmapool.c b/mm/dmapool.c
index f5b79c3268856..ca6cc5d3d9e53 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -40,13 +40,22 @@ 
 #define DMAPOOL_DEBUG 1
 #endif
 
+struct dma_block {
+	struct dma_block *next_block;
+	dma_addr_t dma;
+};
+
 struct dma_pool {		/* the pool */
 	struct list_head page_list;
 	spinlock_t lock;
 	struct device *dev;
+	struct dma_block *next_block;
 	unsigned int size;
 	unsigned int allocation;
 	unsigned int boundary;
+	unsigned int nr_blocks;
+	unsigned int nr_active;
+	unsigned int nr_pages;
 	char name[32];
 	struct list_head pools;
 };
@@ -55,8 +64,6 @@  struct dma_page {		/* cacheable header for 'allocation' bytes */
 	struct list_head page_list;
 	void *vaddr;
 	dma_addr_t dma;
-	unsigned int in_use;
-	unsigned int offset;
 };
 
 static DEFINE_MUTEX(pools_lock);
@@ -64,30 +71,18 @@  static DEFINE_MUTEX(pools_reg_lock);
 
 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	int size;
-	struct dma_page *page;
 	struct dma_pool *pool;
+	unsigned size;
 
 	size = sysfs_emit(buf, "poolinfo - 0.1\n");
 
 	mutex_lock(&pools_lock);
 	list_for_each_entry(pool, &dev->dma_pools, pools) {
-		unsigned pages = 0;
-		size_t blocks = 0;
-
-		spin_lock_irq(&pool->lock);
-		list_for_each_entry(page, &pool->page_list, page_list) {
-			pages++;
-			blocks += page->in_use;
-		}
-		spin_unlock_irq(&pool->lock);
-
 		/* per-pool info, no real statistics yet */
-		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
-				      pool->name, blocks,
-				      (size_t) pages *
-				      (pool->allocation / pool->size),
-				      pool->size, pages);
+		size += sysfs_emit_at(buf, size, "%-16s %4u %4u %4u %2u\n",
+				      pool->name, pool->nr_active,
+				      pool->nr_blocks, pool->size,
+				      pool->nr_pages);
 	}
 	mutex_unlock(&pools_lock);
 
@@ -96,6 +91,25 @@  static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
 
 static DEVICE_ATTR_RO(pools);
 
+static inline struct dma_block *pool_block_pop(struct dma_pool *pool)
+{
+	struct dma_block *block = pool->next_block;
+
+	if (block) {
+		pool->next_block = block->next_block;
+		pool->nr_active++;
+	}
+	return block;
+}
+
+static inline void pool_block_push(struct dma_pool *pool, struct dma_block *block,
+				 dma_addr_t dma)
+{
+	block->dma = dma;
+	block->next_block = pool->next_block;
+	pool->next_block = block;
+}
+
 /**
  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
  * @name: name of pool, for diagnostics
@@ -136,8 +150,8 @@  struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 
 	if (size == 0 || size > INT_MAX)
 		return NULL;
-	else if (size < 4)
-		size = 4;
+	if (size < sizeof(struct dma_block))
+		size = sizeof(struct dma_block);
 
 	size = ALIGN(size, align);
 	allocation = max_t(size_t, size, PAGE_SIZE);
@@ -162,6 +176,10 @@  struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 	retval->size = size;
 	retval->boundary = boundary;
 	retval->allocation = allocation;
+	retval->nr_blocks = 0;
+	retval->nr_active = 0;
+	retval->nr_pages = 0;
+	retval->next_block = NULL;
 
 	INIT_LIST_HEAD(&retval->pools);
 
@@ -199,22 +217,24 @@  EXPORT_SYMBOL(dma_pool_create);
 
 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
 {
-	unsigned int offset = 0;
-	unsigned int next_boundary = pool->boundary;
-
-	page->in_use = 0;
-	page->offset = 0;
-	do {
-		unsigned int next = offset + pool->size;
-		if (unlikely((next + pool->size) >= next_boundary)) {
-			next = next_boundary;
+	unsigned int next_boundary = pool->boundary, offset = 0;
+	struct dma_block *block;
+
+	while (offset < pool->allocation) {
+		if (offset > next_boundary) {
+			offset = next_boundary;
 			next_boundary += pool->boundary;
+			continue;
 		}
-		*(int *)(page->vaddr + offset) = next;
-		offset = next;
-	} while (offset < pool->allocation);
+
+		block = page->vaddr + offset;
+		pool_block_push(pool, block, page->dma + offset);
+		offset += pool->size;
+		pool->nr_blocks++;
+	}
 
 	list_add(&page->page_list, &pool->page_list);
+	pool->nr_pages++;
 }
 
 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
@@ -236,11 +256,6 @@  static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
 	return page;
 }
 
-static inline bool is_page_busy(struct dma_page *page)
-{
-	return page->in_use != 0;
-}
-
 /**
  * dma_pool_destroy - destroys a pool of dma memory blocks.
  * @pool: dma pool that will be destroyed
@@ -252,7 +267,7 @@  static inline bool is_page_busy(struct dma_page *page)
 void dma_pool_destroy(struct dma_pool *pool)
 {
 	struct dma_page *page, *tmp;
-	bool empty = false;
+	bool empty = false, busy = false;
 
 	if (unlikely(!pool))
 		return;
@@ -267,13 +282,15 @@  void dma_pool_destroy(struct dma_pool *pool)
 		device_remove_file(pool->dev, &dev_attr_pools);
 	mutex_unlock(&pools_reg_lock);
 
+	if (pool->nr_active) {
+		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
+		busy = true;
+	}
+
 	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
-		if (!is_page_busy(page))
+		if (!busy)
 			dma_free_coherent(pool->dev, pool->allocation,
 					  page->vaddr, page->dma);
-		else
-			dev_err(pool->dev, "%s %s, %p busy\n", __func__,
-				pool->name, page->vaddr);
 		list_del(&page->page_list);
 		kfree(page);
 	}
@@ -282,18 +299,18 @@  void dma_pool_destroy(struct dma_pool *pool)
 }
 EXPORT_SYMBOL(dma_pool_destroy);
 
-static inline void pool_check_block(struct dma_pool *pool, void *retval,
-				    unsigned int offset, gfp_t mem_flags)
+static inline void pool_check_block(struct dma_pool *pool, struct dma_block *block,
+				    gfp_t mem_flags)
 {
-#ifdef	DMAPOOL_DEBUG
+#ifdef DMAPOOL_DEBUG
+	u8 *data = block;
 	int i;
-	u8 *data = retval;
-	/* page->offset is stored in first 4 bytes */
-	for (i = sizeof(offset); i < pool->size; i++) {
+
+	for (i = sizeof(struct dma_block); i < pool->size; i++) {
 		if (data[i] == POOL_POISON_FREED)
 			continue;
-		dev_err(pool->dev, "%s %s, %p (corrupted)\n",
-			__func__, pool->name, retval);
+		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
+			pool->name, block);
 
 		/*
 		 * Dump the first 4 bytes even if they are not
@@ -303,8 +320,9 @@  static inline void pool_check_block(struct dma_pool *pool, void *retval,
 				data, pool->size, 1);
 		break;
 	}
+
 	if (!want_init_on_alloc(mem_flags))
-		memset(retval, POOL_POISON_ALLOCATED, pool->size);
+		memset(block, POOL_POISON_ALLOCATED, pool->size);
 #endif
 }
 
@@ -321,44 +339,41 @@  static inline void pool_check_block(struct dma_pool *pool, void *retval,
 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 		     dma_addr_t *handle)
 {
-	unsigned long flags;
+	struct dma_block *block;
 	struct dma_page *page;
-	unsigned int offset;
-	void *retval;
+	unsigned long flags;
 
 	might_alloc(mem_flags);
 
 	spin_lock_irqsave(&pool->lock, flags);
-	list_for_each_entry(page, &pool->page_list, page_list) {
-		if (page->offset < pool->allocation)
-			goto ready;
-	}
-
-	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
-	spin_unlock_irqrestore(&pool->lock, flags);
+	block = pool_block_pop(pool);
+	if (!block) {
+		/*
+		 * pool_alloc_page() might sleep, so temporarily drop
+		 * &pool->lock
+		 */
+		spin_unlock_irqrestore(&pool->lock, flags);
 
-	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
-	if (!page)
-		return NULL;
+		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
+		if (!page)
+			return NULL;
 
-	spin_lock_irqsave(&pool->lock, flags);
-	pool_initialise_page(pool, page);
- ready:
-	page->in_use++;
-	offset = page->offset;
-	page->offset = *(int *)(page->vaddr + offset);
-	retval = offset + page->vaddr;
-	*handle = offset + page->dma;
-	pool_check_block(pool, retval, offset, mem_flags);
+		spin_lock_irqsave(&pool->lock, flags);
+		pool_initialise_page(pool, page);
+		block = pool_block_pop(pool);
+	}
 	spin_unlock_irqrestore(&pool->lock, flags);
 
+	*handle = block->dma;
+	pool_check_block(pool, block, mem_flags);
 	if (want_init_on_alloc(mem_flags))
-		memset(retval, 0, pool->size);
+		memset(block, 0, pool->size);
 
-	return retval;
+	return block;
 }
 EXPORT_SYMBOL(dma_pool_alloc);
 
+#ifdef DMAPOOL_DEBUG
 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
 {
 	struct dma_page *page;
@@ -372,33 +387,35 @@  static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
 	return NULL;
 }
 
-#ifdef DMAPOOL_DEBUG
-static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
-				 void *vaddr)
+static inline bool pool_block_err(struct dma_pool *pool, void *vaddr,
+				  dma_addr_t dma)
 {
-	unsigned int chain = page->offset;
+	struct dma_block *block = pool->next_block;
+	struct dma_page *page;
 
-	if ((dma - page->dma) != offset) {
-		dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
+	page = pool_find_page(pool, dma);
+	if (!page) {
+		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
 			__func__, pool->name, vaddr, &dma);
 		return true;
 	}
 
-	while (chain < pool->allocation) {
-		if (chain != offset) {
-			chain = *(int *)(page->vaddr + chain);
+	while (block) {
+		if (block != vaddr) {
+			block = block->next_block;
 			continue;
 		}
 		dev_err(pool->dev, "%s %s, dma %pad already free\n",
 			__func__, pool->name, &dma);
 		return true;
 	}
+
 	memset(vaddr, POOL_POISON_FREED, pool->size);
 	return false;
 }
 #else
-static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
-				 void *vaddr)
+static inline bool pool_block_err(struct dma_pool *pool, void *vaddr,
+				  dma_addr_t dma)
 {
 	if (want_init_on_free())
 		memset(vaddr, 0, pool->size);
@@ -417,28 +434,14 @@  static inline bool pool_page_err(struct dma_pool *pool, struct dma_page *page,
  */
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
 {
-	struct dma_page *page;
+	struct dma_block *block = vaddr;
 	unsigned long flags;
-	unsigned int offset;
 
 	spin_lock_irqsave(&pool->lock, flags);
-	page = pool_find_page(pool, dma);
-	if (!page) {
-		spin_unlock_irqrestore(&pool->lock, flags);
-		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
-			__func__, pool->name, vaddr, &dma);
-		return;
+	if (!pool_block_err(pool, vaddr, dma)) {
+		pool_block_push(pool, block, dma);
+		pool->nr_active--;
 	}
-
-	offset = vaddr - page->vaddr;
-	if (pool_page_err(pool, page, vaddr)) {
-		spin_unlock_irqrestore(&pool->lock, flags);
-		return;
-	}
-
-	page->in_use--;
-	*(int *)vaddr = page->offset;
-	page->offset = offset;
 	spin_unlock_irqrestore(&pool->lock, flags);
 }
 EXPORT_SYMBOL(dma_pool_free);