meminfo: provide estimated per-node's available memory
Commit Message
The system offers an estimate of the per-node's available memory,
in addition to the system's available memory provided by /proc/meminfo.
like commit 34e431b0ae39("/proc/meminfo: provide estimated available
memory"), it is more convenient to provide such an estimate in
/sys/bus/node/devices/nodex/meminfo. If things change in the future,
we only have to change it in one place.
Shown below:
/sys/bus/node/devices/node1/meminfo:
Node 1 MemTotal: 4084480 kB
Node 1 MemFree: 3348820 kB
Node 1 MemAvailable: 3647972 kB
Node 1 MemUsed: 735660 kB
...
Link: https://github.com/numactl/numactl/issues/210
Signed-off-by: Chunsheng Luo <luochunsheng@ustc.edu>
---
drivers/base/node.c | 4 ++++
include/linux/mm.h | 1 +
mm/show_mem.c | 43 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 48 insertions(+)
Comments
Chunsheng Luo <luochunsheng@ustc.edu> writes:
> + available = sum_zone_node_page_state(nid, NR_FREE_PAGES) - pgdat->totalreserve_pages;
> +
> + /*
> + * Not all the page cache can be freed, otherwise the system will
> + * start swapping or thrashing. Assume at least half of the page
> + * cache, or the low watermark worth of cache, needs to stay.
> + */
> + pagecache = node_page_state(pgdat, NR_ACTIVE_FILE) +
> + node_page_state(pgdat, NR_INACTIVE_FILE);
> + pagecache -= min(pagecache / 2, wmark_low);
The magic number 2 should be a define (or maybe even a tunable). Similar
below. It seems quite arbitrary, but I don't have a better solution
either. Maybe could handle dirty differently, but nothing stands out here
> + node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE);
> + reclaimable -= min(reclaimable / 2, wmark_low);
> + available += reclaimable;
> +
> + if (available < 0)
> + available = 0;
That would be a bug? Perhaps add a WARN_ON
With those changes:
Reviewed-by: Andi Kleen <ak@linux.intel.com>
-Andi
@@ -372,11 +372,13 @@ static ssize_t node_read_meminfo(struct device *dev,
int len = 0;
int nid = dev->id;
struct pglist_data *pgdat = NODE_DATA(nid);
+ long available;
struct sysinfo i;
unsigned long sreclaimable, sunreclaimable;
unsigned long swapcached = 0;
si_meminfo_node(&i, nid);
+ available = si_mem_node_available(nid);
sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
#ifdef CONFIG_SWAP
@@ -385,6 +387,7 @@ static ssize_t node_read_meminfo(struct device *dev,
len = sysfs_emit_at(buf, len,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
+ "Node %d MemAvailable: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
"Node %d SwapCached: %8lu kB\n"
"Node %d Active: %8lu kB\n"
@@ -397,6 +400,7 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d Mlocked: %8lu kB\n",
nid, K(i.totalram),
nid, K(i.freeram),
+ nid, K(available),
nid, K(i.totalram - i.freeram),
nid, K(swapcached),
nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
@@ -3202,6 +3202,7 @@ static inline void show_mem(void)
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
+extern long si_mem_node_available(int nid);
#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
extern unsigned long arch_reserved_kernel_pages(void);
#endif
@@ -86,6 +86,49 @@ void si_meminfo(struct sysinfo *val)
EXPORT_SYMBOL(si_meminfo);
#ifdef CONFIG_NUMA
+long si_mem_node_available(int nid)
+{
+ int zone_type;
+ long available;
+ unsigned long pagecache;
+ unsigned long wmark_low = 0;
+ unsigned long reclaimable;
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ wmark_low += low_wmark_pages((&pgdat->node_zones[zone_type]));
+
+ /*
+ * Estimate the amount of memory available for userspace allocations,
+ * without causing swapping for mbind process.
+ */
+ available = sum_zone_node_page_state(nid, NR_FREE_PAGES) - pgdat->totalreserve_pages;
+
+ /*
+ * Not all the page cache can be freed, otherwise the system will
+ * start swapping or thrashing. Assume at least half of the page
+ * cache, or the low watermark worth of cache, needs to stay.
+ */
+ pagecache = node_page_state(pgdat, NR_ACTIVE_FILE) +
+ node_page_state(pgdat, NR_INACTIVE_FILE);
+ pagecache -= min(pagecache / 2, wmark_low);
+ available += pagecache;
+
+ /*
+ * Part of the reclaimable slab and other kernel memory consists of
+ * items that are in use, and cannot be freed. Cap this estimate at the
+ * low watermark.
+ */
+ reclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) +
+ node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE);
+ reclaimable -= min(reclaimable / 2, wmark_low);
+ available += reclaimable;
+
+ if (available < 0)
+ available = 0;
+ return available;
+}
+
void si_meminfo_node(struct sysinfo *val, int nid)
{
int zone_type; /* needs to be signed */