Decouple flags used in prepare_read() from netfs_io_subrequest flags to
make raw fscache APIs more neutral independent on libnetfs. Currently
only *REQ_[COPY_TO_CACHE|ONDEMAND] flags are exposed to fscache, thus
define these two flags for fscache variant, while keep other
netfs_io_subrequest flags untouched.
This is a cleanup without logic change.
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
---
fs/cachefiles/io.c | 10 +++++-----
fs/erofs/fscache.c | 5 +++--
fs/netfs/io.c | 14 ++++++++++----
include/linux/fscache.h | 3 +++
include/linux/netfs.h | 1 -
5 files changed, 21 insertions(+), 12 deletions(-)
@@ -415,9 +415,9 @@ static enum fscache_io_source cachefiles_prepare_read(struct fscache_resources *
}
if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
- __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
+ __set_bit(FSCACHE_REQ_COPY_TO_CACHE, _flags);
why = cachefiles_trace_read_no_data;
- if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
+ if (!test_bit(FSCACHE_REQ_ONDEMAND, _flags))
goto out_no_object;
}
@@ -487,11 +487,11 @@ static enum fscache_io_source cachefiles_prepare_read(struct fscache_resources *
goto out;
download_and_store:
- __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
- if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
+ __set_bit(FSCACHE_REQ_COPY_TO_CACHE, _flags);
+ if (test_bit(FSCACHE_REQ_ONDEMAND, _flags)) {
rc = cachefiles_ondemand_read(object, start, len);
if (!rc) {
- __clear_bit(NETFS_SREQ_ONDEMAND, _flags);
+ __clear_bit(FSCACHE_REQ_ONDEMAND, _flags);
goto retry;
}
ret = FSCACHE_INVALID_READ;
@@ -148,6 +148,7 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
struct iov_iter iter;
loff_t start = rreq->start;
size_t len = rreq->len;
+ unsigned long flags;
size_t done = 0;
int ret;
@@ -172,12 +173,12 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
subreq->start = pstart + done;
subreq->len = len - done;
- subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+ flags = 1 << FSCACHE_REQ_ONDEMAND;
source = cres->ops->prepare_read(cres, &subreq->start,
- &subreq->len, &subreq->flags, LLONG_MAX);
+ &subreq->len, &flags, LLONG_MAX);
if (WARN_ON(subreq->len == 0))
source = FSCACHE_INVALID_READ;
if (source != FSCACHE_READ_FROM_CACHE) {
@@ -485,10 +485,16 @@ static enum fscache_io_source netfs_cache_prepare_read(struct netfs_io_subreques
{
struct netfs_io_request *rreq = subreq->rreq;
struct fscache_resources *cres = &rreq->cache_resources;
-
- if (cres->ops)
- return cres->ops->prepare_read(cres, &subreq->start,
- &subreq->len, &subreq->flags, i_size);
+ enum fscache_io_source source;
+ unsigned long flags = 0;
+
+ if (cres->ops) {
+ source = cres->ops->prepare_read(cres, &subreq->start,
+ &subreq->len, &flags, i_size);
+ if (test_bit(FSCACHE_REQ_COPY_TO_CACHE, &flags))
+ __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
+ return source;
+ }
if (subreq->start >= rreq->i_size)
return FSCACHE_FILL_WITH_ZEROES;
return FSCACHE_DOWNLOAD_FROM_SERVER;
@@ -147,6 +147,9 @@ struct fscache_cookie {
};
};
+#define FSCACHE_REQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
+#define FSCACHE_REQ_ONDEMAND 1 /* Set if it's from on-demand read mode */
+
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -160,7 +160,6 @@ struct netfs_io_subrequest {
#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
-#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
};
enum netfs_io_origin {