From: Dave Chinner <dchinner@redhat.com>
Because we are going to change how the structure is laid out to
support RTPREEMPT and LOCKDEP, just assuming that the hash table is
allocated as zeroed memory is no longer sufficient to initialise
a hash-bl table.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
fs/dcache.c | 21 ++++++++++++++++++++-
fs/fscache/cookie.c | 8 ++++++++
fs/fscache/internal.h | 6 ++++--
fs/fscache/main.c | 3 +++
fs/fscache/volume.c | 8 ++++++++
fs/inode.c | 19 ++++++++++++++++++-
6 files changed, 61 insertions(+), 4 deletions(-)
On Wed, Dec 06, 2023 at 05:05:38PM +1100, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
>
> Because we are going to change how the structure is laid out to
> support RTPREEMPT and LOCKDEP, just assuming that the hash table is
> allocated as zeroed memory is no longer sufficient to initialise
> a hash-bl table.
static inline void init_bl_hash(struct hlist_bl *table, int shift)?
@@ -3284,7 +3284,10 @@ __setup("dhash_entries=", set_dhash_entries);
static void __init dcache_init_early(void)
{
- /* If hashes are distributed across NUMA nodes, defer
+ int i;
+
+ /*
+ * If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
if (hashdist)
@@ -3300,11 +3303,20 @@ static void __init dcache_init_early(void)
NULL,
0,
0);
+ /*
+ * The value returned in d_hash_shift tells us the size of the
+ * hash table that was allocated as a log2 value.
+ */
+ for (i = 0; i < (1 << d_hash_shift); i++)
+ INIT_HLIST_BL_HEAD(&dentry_hashtable[i]);
+
d_hash_shift = 32 - d_hash_shift;
}
static void __init dcache_init(void)
{
+ int i;
+
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
@@ -3328,6 +3340,13 @@ static void __init dcache_init(void)
NULL,
0,
0);
+ /*
+ * The value returned in d_hash_shift tells us the size of the
+ * hash table that was allocated as a log2 value.
+ */
+ for (i = 0; i < (1 << d_hash_shift); i++)
+ INIT_HLIST_BL_HEAD(&dentry_hashtable[i]);
+
d_hash_shift = 32 - d_hash_shift;
}
@@ -32,6 +32,14 @@ static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+void fscache_cookie_hash_init(void)
+{
+ int i;
+
+ for (i = 0; i < (1 << fscache_cookie_hash_shift); i++)
+ INIT_HLIST_BL_HEAD(&fscache_cookie_hash[i]);
+}
+
void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
{
const u8 *k;
@@ -61,8 +61,9 @@ extern const struct seq_operations fscache_cookies_seq_ops;
#endif
extern struct timer_list fscache_cookie_lru_timer;
-extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
-extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+void fscache_cookie_hash_init(void);
+void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
+bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
enum fscache_access_trace why);
static inline void fscache_see_cookie(struct fscache_cookie *cookie,
@@ -143,6 +144,7 @@ int fscache_stats_show(struct seq_file *m, void *v);
extern const struct seq_operations fscache_volumes_seq_ops;
#endif
+void fscache_volume_hash_init(void);
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);
void fscache_put_volume(struct fscache_volume *volume,
@@ -92,6 +92,9 @@ static int __init fscache_init(void)
goto error_cookie_jar;
}
+ fscache_volume_hash_init();
+ fscache_cookie_hash_init();
+
pr_notice("Loaded\n");
return 0;
@@ -17,6 +17,14 @@ static LIST_HEAD(fscache_volumes);
static void fscache_create_volume_work(struct work_struct *work);
+void fscache_volume_hash_init(void)
+{
+ int i;
+
+ for (i = 0; i < (1 << fscache_volume_hash_shift); i++)
+ INIT_HLIST_BL_HEAD(&fscache_volume_hash[i]);
+}
+
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where)
{
@@ -2353,7 +2353,10 @@ __setup("ihash_entries=", set_ihash_entries);
*/
void __init inode_init_early(void)
{
- /* If hashes are distributed across NUMA nodes, defer
+ int i;
+
+ /*
+ * If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
if (hashdist)
@@ -2369,10 +2372,18 @@ void __init inode_init_early(void)
&i_hash_mask,
0,
0);
+ /*
+ * The value returned in i_hash_shift tells us the size of the
+ * hash table that was allocated as a log2 value.
+ */
+ for (i = 0; i < (1 << i_hash_shift); i++)
+ INIT_HLIST_BL_HEAD(&inode_hashtable[i]);
}
void __init inode_init(void)
{
+ int i;
+
/* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache",
sizeof(struct inode),
@@ -2395,6 +2406,12 @@ void __init inode_init(void)
&i_hash_mask,
0,
0);
+ /*
+ * The value returned in i_hash_shift tells us the size of the
+ * hash table that was allocated as a log2 value.
+ */
+ for (i = 0; i < (1 << i_hash_shift); i++)
+ INIT_HLIST_BL_HEAD(&inode_hashtable[i]);
}
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)