Skip to content

Commit

Permalink
netfs: Define an interface to talk to a cache
Browse files Browse the repository at this point in the history
Add an interface to the netfs helper library for reading data from the
cache instead of downloading it from the server and support for writing
data just downloaded or cleared to the cache.

The API passes an iov_iter to the cache read/write routines to indicate the
data/buffer to be used.  This is done using the ITER_XARRAY type to provide
direct access to the netfs inode's pagecache.

When the netfs's ->begin_cache_operation() method is called, this must fill
in the cache_resources in the netfs_read_request struct, including the
netfs_cache_ops used by the helper lib to talk to the cache.  The helper
lib does not directly access the cache.

Changes:
v6:
- Call trace_netfs_read() after beginning the cache op so that the cookie
  debug ID can be logged[3].
- Don't record the error from writing to the cache.  We don't want to pass
  it back to the netfs[4].
- Fix copy-to-cache subreq amalgamation to not round up as it goes along
  otherwise it overcalculates the length of the write[5].

v5:
- Use end_page_fscache() rather than unlock_page_fscache()[2].

v4:
- Added flag to netfs_subreq_terminated() to indicate that the caller may
  have been running async and stuff that might sleep needs punting to a
  workqueue (can't use in_softirq()[1]).
- Add missing inc of netfs_n_rh_read stat.
- Move initial definition of fscache_begin_read_operation() elsewhere.
- Need to call op->begin_cache_operation() from netfs_write_begin().

Signed-off-by: David Howells <[email protected]>
Reviewed-and-tested-by: Jeff Layton <[email protected]>
Tested-by: Dave Wysochanski <[email protected]>
Tested-By: Marc Dionne <[email protected]>
cc: Matthew Wilcox <[email protected]>
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]/ [1]
Link: https://lore.kernel.org/r/[email protected]/ [2]
Link: https://lore.kernel.org/r/161781045123.463527.14533348855710902201.stgit@warthog.procyon.org.uk/ [3]
Link: https://lore.kernel.org/r/161781046256.463527.18158681600085556192.stgit@warthog.procyon.org.uk/ [4]
Link: https://lore.kernel.org/r/161781047695.463527.7463536103593997492.stgit@warthog.procyon.org.uk/ [5]
Link: https://lore.kernel.org/r/161118141321.1232039.8296910406755622458.stgit@warthog.procyon.org.uk/ # rfc
Link: https://lore.kernel.org/r/161161036700.2537118.11170748455436854978.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/161340399569.1303470.1138884774643385730.stgit@warthog.procyon.org.uk/ # v3
Link: https://lore.kernel.org/r/161539542874.286939.13337898213448136687.stgit@warthog.procyon.org.uk/ # v4
Link: https://lore.kernel.org/r/161653799826.2770958.9015430297426331950.stgit@warthog.procyon.org.uk/ # v5
Link: https://lore.kernel.org/r/161789081462.6155.3853904866933313256.stgit@warthog.procyon.org.uk/ # v6
  • Loading branch information
dhowells committed Apr 23, 2021
1 parent e1b1240 commit 726218f
Show file tree
Hide file tree
Showing 3 changed files with 295 additions and 1 deletion.
239 changes: 238 additions & 1 deletion fs/netfs/read_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ static void netfs_free_read_request(struct work_struct *work)
if (rreq->netfs_priv)
rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
if (rreq->cache_resources.ops)
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
kfree(rreq);
netfs_stat_d(&netfs_n_rh_rreq);
}
Expand Down Expand Up @@ -154,6 +156,34 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
iov_iter_zero(iov_iter_count(&iter), &iter);
}

static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
bool was_async)
{
struct netfs_read_subrequest *subreq = priv;

netfs_subreq_terminated(subreq, transferred_or_error, was_async);
}

/*
* Issue a read against the cache.
* - Eats the caller's ref on subreq.
*/
static void netfs_read_from_cache(struct netfs_read_request *rreq,
struct netfs_read_subrequest *subreq,
bool seek_data)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct iov_iter iter;

netfs_stat(&netfs_n_rh_read);
iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
subreq->start + subreq->transferred,
subreq->len - subreq->transferred);

cres->ops->read(cres, subreq->start, &iter, seek_data,
netfs_cache_read_terminated, subreq);
}

/*
* Fill a subrequest region with zeroes.
*/
Expand Down Expand Up @@ -198,6 +228,141 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async
netfs_put_read_request(rreq, was_async);
}

/*
* Deal with the completion of writing the data to the cache. We have to clear
* the PG_fscache bits on the pages involved and release the caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
bool was_async)
{
struct netfs_read_subrequest *subreq;
struct page *page;
pgoff_t unlocked = 0;
bool have_unlocked = false;

rcu_read_lock();

list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);

xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
/* We might have multiple writes from the same huge
* page, but we mustn't unlock a page more than once.
*/
if (have_unlocked && page->index <= unlocked)
continue;
unlocked = page->index;
end_page_fscache(page);
have_unlocked = true;
}
}

rcu_read_unlock();
netfs_rreq_completed(rreq, was_async);
}

static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
bool was_async)
{
struct netfs_read_subrequest *subreq = priv;
struct netfs_read_request *rreq = subreq->rreq;

if (IS_ERR_VALUE(transferred_or_error)) {
netfs_stat(&netfs_n_rh_write_failed);
} else {
netfs_stat(&netfs_n_rh_write_done);
}

trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);

/* If we decrement nr_wr_ops to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_wr_ops))
netfs_rreq_unmark_after_write(rreq, was_async);

netfs_put_subrequest(subreq, was_async);
}

/*
* Perform any outstanding writes to the cache. We inherit a ref from the
* caller.
*/
static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct netfs_read_subrequest *subreq, *next, *p;
struct iov_iter iter;
int ret;

trace_netfs_rreq(rreq, netfs_rreq_trace_write);

/* We don't want terminating writes trying to wake us up whilst we're
* still going through the list.
*/
atomic_inc(&rreq->nr_wr_ops);

list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
list_del_init(&subreq->rreq_link);
netfs_put_subrequest(subreq, false);
}
}

list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
/* Amalgamate adjacent writes */
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
next = list_next_entry(subreq, rreq_link);
if (next->start != subreq->start + subreq->len)
break;
subreq->len += next->len;
list_del_init(&next->rreq_link);
netfs_put_subrequest(next, false);
}

ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
rreq->i_size);
if (ret < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
continue;
}

iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
subreq->start, subreq->len);

atomic_inc(&rreq->nr_wr_ops);
netfs_stat(&netfs_n_rh_write);
netfs_get_read_subrequest(subreq);
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
cres->ops->write(cres, subreq->start, &iter,
netfs_rreq_copy_terminated, subreq);
}

/* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_wr_ops))
netfs_rreq_unmark_after_write(rreq, false);
}

static void netfs_rreq_write_to_cache_work(struct work_struct *work)
{
struct netfs_read_request *rreq =
container_of(work, struct netfs_read_request, work);

netfs_rreq_do_write_to_cache(rreq);
}

static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
bool was_async)
{
if (was_async) {
rreq->work.func = netfs_rreq_write_to_cache_work;
if (!queue_work(system_unbound_wq, &rreq->work))
BUG();
} else {
netfs_rreq_do_write_to_cache(rreq);
}
}

/*
* Unlock the pages in a read operation. We need to set PG_fscache on any
* pages we're going to write back before we unlock them.
Expand Down Expand Up @@ -299,7 +464,10 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq,

netfs_get_read_subrequest(subreq);
atomic_inc(&rreq->nr_rd_ops);
netfs_read_from_server(rreq, subreq);
if (subreq->source == NETFS_READ_FROM_CACHE)
netfs_read_from_cache(rreq, subreq, true);
else
netfs_read_from_server(rreq, subreq);
}

/*
Expand Down Expand Up @@ -344,6 +512,25 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
return false;
}

/*
* Check to see if the data read is still valid.
*/
static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
{
struct netfs_read_subrequest *subreq;

if (!rreq->netfs_ops->is_still_valid ||
rreq->netfs_ops->is_still_valid(rreq))
return;

list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (subreq->source == NETFS_READ_FROM_CACHE) {
subreq->error = -ESTALE;
__set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
}
}
}

/*
* Assess the state of a read request and decide what to do next.
*
Expand All @@ -355,6 +542,8 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
trace_netfs_rreq(rreq, netfs_rreq_trace_assess);

again:
netfs_rreq_is_still_valid(rreq);

if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
if (netfs_rreq_perform_resubmissions(rreq))
Expand All @@ -367,6 +556,9 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);

if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
return netfs_rreq_write_to_cache(rreq, was_async);

netfs_rreq_completed(rreq, was_async);
}

Expand Down Expand Up @@ -504,7 +696,10 @@ static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequ
loff_t i_size)
{
struct netfs_read_request *rreq = subreq->rreq;
struct netfs_cache_resources *cres = &rreq->cache_resources;

if (cres->ops)
return cres->ops->prepare_read(subreq, i_size);
if (subreq->start >= rreq->i_size)
return NETFS_FILL_WITH_ZEROES;
return NETFS_DOWNLOAD_FROM_SERVER;
Expand Down Expand Up @@ -595,6 +790,9 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
case NETFS_DOWNLOAD_FROM_SERVER:
netfs_read_from_server(rreq, subreq);
break;
case NETFS_READ_FROM_CACHE:
netfs_read_from_cache(rreq, subreq, false);
break;
default:
BUG();
}
Expand All @@ -607,9 +805,23 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
return false;
}

static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
loff_t *_start, size_t *_len, loff_t i_size)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;

if (cres->ops && cres->ops->expand_readahead)
cres->ops->expand_readahead(cres, _start, _len, i_size);
}

static void netfs_rreq_expand(struct netfs_read_request *rreq,
struct readahead_control *ractl)
{
/* Give the cache a chance to change the request parameters. The
* resultant request must contain the original region.
*/
netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);

/* Give the netfs a chance to change the request parameters. The
* resultant request must contain the original region.
*/
Expand Down Expand Up @@ -661,6 +873,7 @@ void netfs_readahead(struct readahead_control *ractl,
struct netfs_read_request *rreq;
struct page *page;
unsigned int debug_index = 0;
int ret;

_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));

Expand All @@ -674,6 +887,12 @@ void netfs_readahead(struct readahead_control *ractl,
rreq->start = readahead_pos(ractl);
rreq->len = readahead_length(ractl);

if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
}

netfs_stat(&netfs_n_rh_readahead);
trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
netfs_read_trace_readahead);
Expand All @@ -698,6 +917,9 @@ void netfs_readahead(struct readahead_control *ractl,
netfs_rreq_assess(rreq, false);
return;

cleanup_free:
netfs_put_read_request(rreq, false);
return;
cleanup:
if (netfs_priv)
ops->cleanup(ractl->mapping, netfs_priv);
Expand Down Expand Up @@ -744,6 +966,14 @@ int netfs_readpage(struct file *file,
rreq->start = page_index(page) * PAGE_SIZE;
rreq->len = thp_size(page);

if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
unlock_page(page);
goto out;
}
}

netfs_stat(&netfs_n_rh_readpage);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);

Expand All @@ -768,6 +998,7 @@ int netfs_readpage(struct file *file,
ret = rreq->error;
if (ret == 0 && rreq->submitted < rreq->len)
ret = -EIO;
out:
netfs_put_read_request(rreq, false);
return ret;
}
Expand Down Expand Up @@ -873,6 +1104,12 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
__set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
netfs_priv = NULL;

if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto error_put;
}

netfs_stat(&netfs_n_rh_write_begin);
trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);

Expand Down
Loading

0 comments on commit 726218f

Please sign in to comment.