summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/fscache-cache.h4
-rw-r--r--include/linux/fscache.h50
-rw-r--r--include/linux/netfs.h234
-rw-r--r--include/linux/pagemap.h42
-rw-r--r--include/linux/uio.h10
-rw-r--r--include/trace/events/netfs.h261
7 files changed, 596 insertions, 29 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5ac7bad3f0de..bf4e90d3ab18 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -892,18 +892,22 @@ struct fown_struct {
int signum; /* posix.1b rt signal to be delivered on IO */
};
-/*
- * Track a single file's readahead state
+/**
+ * struct file_ra_state - Track a file's readahead state.
+ * @start: Where the most recent readahead started.
+ * @size: Number of pages read in the most recent readahead.
+ * @async_size: Start next readahead when this many pages are left.
+ * @ra_pages: Maximum size of a readahead request.
+ * @mmap_miss: How many mmap accesses missed in the page cache.
+ * @prev_pos: The last byte in the most recent read request.
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned int size; /* # of readahead pages */
- unsigned int async_size; /* do asynchronous readahead when
- there are only # of pages ahead */
-
- unsigned int ra_pages; /* Maximum readahead window */
- unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
- loff_t prev_pos; /* Cache last read() position */
+ pgoff_t start;
+ unsigned int size;
+ unsigned int async_size;
+ unsigned int ra_pages;
+ unsigned int mmap_miss;
+ loff_t prev_pos;
};
/*
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 3f0b19dcfae7..3235ddbdcc09 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -304,6 +304,10 @@ struct fscache_cache_ops {
/* dissociate a cache from all the pages it was backing */
void (*dissociate_pages)(struct fscache_cache *cache);
+
+ /* Begin a read operation for the netfs lib */
+ int (*begin_read_operation)(struct netfs_read_request *rreq,
+ struct fscache_retrieval *op);
};
extern struct fscache_cookie fscache_fsdef_index;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index a1c928fe98e7..abc1c4737fb8 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -19,6 +19,7 @@
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/list_bl.h>
+#include <linux/netfs.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
#define fscache_available() (1)
@@ -29,16 +30,6 @@
#endif
-/*
- * overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
-
/* pattern used to fill dead space in an index entry */
#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
@@ -46,6 +37,7 @@ struct pagevec;
struct fscache_cache_tag;
struct fscache_cookie;
struct fscache_netfs;
+struct netfs_read_request;
typedef void (*fscache_rw_complete_t)(struct page *page,
void *context,
@@ -200,6 +192,10 @@ extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
extern int __fscache_attr_changed(struct fscache_cookie *);
extern void __fscache_invalidate(struct fscache_cookie *);
extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
+
+#ifdef FSCACHE_USE_NEW_IO_API
+extern int __fscache_begin_read_operation(struct netfs_read_request *, struct fscache_cookie *);
+#else
extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
struct page *,
fscache_rw_complete_t,
@@ -223,6 +219,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
struct inode *);
extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
struct list_head *pages);
+#endif /* FSCACHE_USE_NEW_IO_API */
+
extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
bool (*)(void *), void *);
@@ -507,6 +505,36 @@ int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
return -ENOBUFS;
}
+#ifdef FSCACHE_USE_NEW_IO_API
+
+/**
+ * fscache_begin_read_operation - Begin a read operation for the netfs lib
+ * @rreq: The read request being undertaken
+ * @cookie: The cookie representing the cache object
+ *
+ * Begin a read operation on behalf of the netfs helper library. @rreq
+ * indicates the read request to which the operation state should be attached;
+ * @cookie indicates the cache object that will be accessed.
+ *
+ * This is intended to be called from the ->begin_cache_operation() netfs lib
+ * operation as implemented by the network filesystem.
+ *
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
+ */
+static inline
+int fscache_begin_read_operation(struct netfs_read_request *rreq,
+ struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(rreq, cookie);
+ return -ENOBUFS;
+}
+
+#else /* FSCACHE_USE_NEW_IO_API */
+
/**
* fscache_read_or_alloc_page - Read a page from the cache or allocate a block
* in which to store it
@@ -786,6 +814,8 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
__fscache_uncache_all_inode_pages(cookie, inode);
}
+#endif /* FSCACHE_USE_NEW_IO_API */
+
/**
* fscache_disable_cookie - Disable a cookie
* @cookie: The cookie representing the cache object
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
new file mode 100644
index 000000000000..9062adfa2fb9
--- /dev/null
+++ b/include/linux/netfs.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support services.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/filesystems/netfs_library.rst
+ *
+ * for a description of the network filesystem interface declared here.
+ */
+
+#ifndef _LINUX_NETFS_H
+#define _LINUX_NETFS_H
+
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+/*
+ * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
+ * a page is currently backed by a local disk cache
+ */
+#define PageFsCache(page) PagePrivate2((page))
+#define SetPageFsCache(page) SetPagePrivate2((page))
+#define ClearPageFsCache(page) ClearPagePrivate2((page))
+#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
+#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+
+/**
+ * set_page_fscache - Set PG_fscache on a page and take a ref
+ * @page: The page.
+ *
+ * Set the PG_fscache (PG_private_2) flag on a page and take the reference
+ * needed for the VM to handle its lifetime correctly. This sets the flag and
+ * takes the reference unconditionally, so care must be taken not to set the
+ * flag again if it's already set.
+ */
+static inline void set_page_fscache(struct page *page)
+{
+ set_page_private_2(page);
+}
+
+/**
+ * end_page_fscache - Clear PG_fscache and release any waiters
+ * @page: The page
+ *
+ * Clear the PG_fscache (PG_private_2) bit on a page and wake up any sleepers
+ * waiting for this. The page ref held for PG_private_2 being set is released.
+ *
+ * This is, for example, used when a netfs page is being written to a local
+ * disk cache, thereby allowing writes to the cache for the same page to be
+ * serialised.
+ */
+static inline void end_page_fscache(struct page *page)
+{
+ end_page_private_2(page);
+}
+
+/**
+ * wait_on_page_fscache - Wait for PG_fscache to be cleared on a page
+ * @page: The page to wait on
+ *
+ * Wait for PG_fscache (aka PG_private_2) to be cleared on a page.
+ */
+static inline void wait_on_page_fscache(struct page *page)
+{
+ wait_on_page_private_2(page);
+}
+
+/**
+ * wait_on_page_fscache_killable - Wait for PG_fscache to be cleared on a page
+ * @page: The page to wait on
+ *
+ * Wait for PG_fscache (aka PG_private_2) to be cleared on a page or until a
+ * fatal signal is received by the calling task.
+ *
+ * Return:
+ * - 0 if successful.
+ * - -EINTR if a fatal signal was encountered.
+ */
+static inline int wait_on_page_fscache_killable(struct page *page)
+{
+ return wait_on_page_private_2_killable(page);
+}
+
+enum netfs_read_source {
+ NETFS_FILL_WITH_ZEROES,
+ NETFS_DOWNLOAD_FROM_SERVER,
+ NETFS_READ_FROM_CACHE,
+ NETFS_INVALID_READ,
+} __mode(byte);
+
+typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
+ bool was_async);
+
+/*
+ * Resources required to do operations on a cache.
+ */
+struct netfs_cache_resources {
+ const struct netfs_cache_ops *ops;
+ void *cache_priv;
+ void *cache_priv2;
+};
+
+/*
+ * Descriptor for a single component subrequest.
+ */
+struct netfs_read_subrequest {
+ struct netfs_read_request *rreq; /* Supervising read request */
+ struct list_head rreq_link; /* Link in rreq->subrequests */
+ loff_t start; /* Where to start the I/O */
+ size_t len; /* Size of the I/O */
+ size_t transferred; /* Amount of data transferred */
+ refcount_t usage;
+ short error; /* 0 or error that occurred */
+ unsigned short debug_index; /* Index in list (for debugging output) */
+ enum netfs_read_source source; /* Where to read from */
+ unsigned long flags;
+#define NETFS_SREQ_WRITE_TO_CACHE 0 /* Set if should write to cache */
+#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
+#define NETFS_SREQ_SHORT_READ 2 /* Set if there was a short read from the cache */
+#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
+#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+};
+
+/*
+ * Descriptor for a read helper request. This is used to make multiple I/O
+ * requests on a variety of sources and then stitch the result together.
+ */
+struct netfs_read_request {
+ struct work_struct work;
+ struct inode *inode; /* The file being accessed */
+ struct address_space *mapping; /* The mapping being accessed */
+ struct netfs_cache_resources cache_resources;
+ struct list_head subrequests; /* Requests to fetch I/O from disk or net */
+ void *netfs_priv; /* Private data for the netfs */
+ unsigned int debug_id;
+ unsigned int cookie_debug_id;
+ atomic_t nr_rd_ops; /* Number of read ops in progress */
+ atomic_t nr_wr_ops; /* Number of write ops in progress */
+ size_t submitted; /* Amount submitted for I/O so far */
+ size_t len; /* Length of the request */
+ short error; /* 0 or error that occurred */
+ loff_t i_size; /* Size of the file */
+ loff_t start; /* Start position */
+ pgoff_t no_unlock_page; /* Don't unlock this page after read */
+ refcount_t usage;
+ unsigned long flags;
+#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
+#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */
+#define NETFS_RREQ_NO_UNLOCK_PAGE 2 /* Don't unlock no_unlock_page on completion */
+#define NETFS_RREQ_DONT_UNLOCK_PAGES 3 /* Don't unlock the pages on completion */
+#define NETFS_RREQ_FAILED 4 /* The request failed */
+#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+ const struct netfs_read_request_ops *netfs_ops;
+};
+
+/*
+ * Operations the network filesystem can/must provide to the helpers.
+ */
+struct netfs_read_request_ops {
+ bool (*is_cache_enabled)(struct inode *inode);
+ void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
+ int (*begin_cache_operation)(struct netfs_read_request *rreq);
+ void (*expand_readahead)(struct netfs_read_request *rreq);
+ bool (*clamp_length)(struct netfs_read_subrequest *subreq);
+ void (*issue_op)(struct netfs_read_subrequest *subreq);
+ bool (*is_still_valid)(struct netfs_read_request *rreq);
+ int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
+ struct page *page, void **_fsdata);
+ void (*done)(struct netfs_read_request *rreq);
+ void (*cleanup)(struct address_space *mapping, void *netfs_priv);
+};
+
+/*
+ * Table of operations for access to a cache. This is obtained by
+ * rreq->ops->begin_cache_operation().
+ */
+struct netfs_cache_ops {
+ /* End an operation */
+ void (*end_operation)(struct netfs_cache_resources *cres);
+
+ /* Read data from the cache */
+ int (*read)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ bool seek_data,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Write data to the cache */
+ int (*write)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Expand readahead request */
+ void (*expand_readahead)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+
+ /* Prepare a read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
+ loff_t i_size);
+
+ /* Prepare a write operation, working out what part of the write we can
+ * actually do.
+ */
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+};
+
+struct readahead_control;
+extern void netfs_readahead(struct readahead_control *,
+ const struct netfs_read_request_ops *,
+ void *);
+extern int netfs_readpage(struct file *,
+ struct page *,
+ const struct netfs_read_request_ops *,
+ void *);
+extern int netfs_write_begin(struct file *, struct address_space *,
+ loff_t, unsigned int, unsigned int, struct page **,
+ void **,
+ const struct netfs_read_request_ops *,
+ void *);
+
+extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool);
+extern void netfs_stats_show(struct seq_file *);
+
+#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4c1ffe1d5dac..4686f9ab0636 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -688,6 +688,26 @@ void wait_for_stable_page(struct page *page);
void page_endio(struct page *page, bool is_write, int err);
+/**
+ * set_page_private_2 - Set PG_private_2 on a page and take a ref
+ * @page: The page.
+ *
+ * Set the PG_private_2 flag on a page and take the reference needed for the VM
+ * to handle its lifetime correctly. This sets the flag and takes the
+ * reference unconditionally, so care must be taken not to set the flag again
+ * if it's already set.
+ */
+static inline void set_page_private_2(struct page *page)
+{
+ page = compound_head(page);
+ get_page(page);
+ SetPagePrivate2(page);
+}
+
+void end_page_private_2(struct page *page);
+void wait_on_page_private_2(struct page *page);
+int wait_on_page_private_2_killable(struct page *page);
+
/*
* Add an arbitrary waiter to a page's wait queue
*/
@@ -792,20 +812,23 @@ static inline int add_to_page_cache(struct page *page,
* @file: The file, used primarily by network filesystems for authentication.
* May be NULL if invoked internally by the filesystem.
* @mapping: Readahead this filesystem object.
+ * @ra: File readahead state. May be NULL.
*/
struct readahead_control {
struct file *file;
struct address_space *mapping;
+ struct file_ra_state *ra;
/* private: use the readahead_* accessors instead */
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
};
-#define DEFINE_READAHEAD(rac, f, m, i) \
- struct readahead_control rac = { \
+#define DEFINE_READAHEAD(ractl, f, r, m, i) \
+ struct readahead_control ractl = { \
.file = f, \
.mapping = m, \
+ .ra = r, \
._index = i, \
}
@@ -813,10 +836,11 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count);
-void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
+void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct page *,
unsigned long req_count);
-void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
- struct page *, unsigned long req_count);
+void readahead_expand(struct readahead_control *ractl,
+ loff_t new_start, size_t new_len);
/**
* page_cache_sync_readahead - generic file readahead
@@ -836,8 +860,8 @@ void page_cache_sync_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file, pgoff_t index,
unsigned long req_count)
{
- DEFINE_READAHEAD(ractl, file, mapping, index);
- page_cache_sync_ra(&ractl, ra, req_count);
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_sync_ra(&ractl, req_count);
}
/**
@@ -859,8 +883,8 @@ void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
struct page *page, pgoff_t index, unsigned long req_count)
{
- DEFINE_READAHEAD(ractl, file, mapping, index);
- page_cache_async_ra(&ractl, ra, page, req_count);
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_async_ra(&ractl, page, req_count);
}
/**
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 27ff8eb786dc..d3ec87706d75 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -24,6 +24,7 @@ enum iter_type {
ITER_BVEC = 16,
ITER_PIPE = 32,
ITER_DISCARD = 64,
+ ITER_XARRAY = 128,
};
struct iov_iter {
@@ -39,6 +40,7 @@ struct iov_iter {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ struct xarray *xarray;
struct pipe_inode_info *pipe;
};
union {
@@ -47,6 +49,7 @@ struct iov_iter {
unsigned int head;
unsigned int start_head;
};
+ loff_t xarray_start;
};
};
@@ -80,6 +83,11 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
+static inline bool iov_iter_is_xarray(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_XARRAY;
+}
+
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
return i->type & (READ | WRITE);
@@ -221,6 +229,8 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_
void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
+ loff_t start, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
new file mode 100644
index 000000000000..de1c64635e42
--- /dev/null
+++ b/include/trace/events/netfs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support module tracepoints
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM netfs
+
+#if !defined(_TRACE_NETFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NETFS_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum netfs_read_trace {
+ netfs_read_trace_expanded,
+ netfs_read_trace_readahead,
+ netfs_read_trace_readpage,
+ netfs_read_trace_write_begin,
+};
+
+enum netfs_rreq_trace {
+ netfs_rreq_trace_assess,
+ netfs_rreq_trace_done,
+ netfs_rreq_trace_free,
+ netfs_rreq_trace_resubmit,
+ netfs_rreq_trace_unlock,
+ netfs_rreq_trace_unmark,
+ netfs_rreq_trace_write,
+};
+
+enum netfs_sreq_trace {
+ netfs_sreq_trace_download_instead,
+ netfs_sreq_trace_free,
+ netfs_sreq_trace_prepare,
+ netfs_sreq_trace_resubmit_short,
+ netfs_sreq_trace_submit,
+ netfs_sreq_trace_terminated,
+ netfs_sreq_trace_write,
+ netfs_sreq_trace_write_skip,
+ netfs_sreq_trace_write_term,
+};
+
+enum netfs_failure {
+ netfs_fail_check_write_begin,
+ netfs_fail_copy_to_cache,
+ netfs_fail_read,
+ netfs_fail_short_readpage,
+ netfs_fail_short_write_begin,
+ netfs_fail_prepare_write,
+};
+
+#endif
+
+#define netfs_read_traces \
+ EM(netfs_read_trace_expanded, "EXPANDED ") \
+ EM(netfs_read_trace_readahead, "READAHEAD") \
+ EM(netfs_read_trace_readpage, "READPAGE ") \
+ E_(netfs_read_trace_write_begin, "WRITEBEGN")
+
+#define netfs_rreq_traces \
+ EM(netfs_rreq_trace_assess, "ASSESS") \
+ EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_resubmit, "RESUBM") \
+ EM(netfs_rreq_trace_unlock, "UNLOCK") \
+ EM(netfs_rreq_trace_unmark, "UNMARK") \
+ E_(netfs_rreq_trace_write, "WRITE ")
+
+#define netfs_sreq_sources \
+ EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
+ EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
+ EM(NETFS_READ_FROM_CACHE, "READ") \
+ E_(NETFS_INVALID_READ, "INVL") \
+
+#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_download_instead, "RDOWN") \
+ EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_prepare, "PREP ") \
+ EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_submit, "SUBMT") \
+ EM(netfs_sreq_trace_terminated, "TERM ") \
+ EM(netfs_sreq_trace_write, "WRITE") \
+ EM(netfs_sreq_trace_write_skip, "SKIP ") \
+ E_(netfs_sreq_trace_write_term, "WTERM")
+
+#define netfs_failures \
+ EM(netfs_fail_check_write_begin, "check-write-begin") \
+ EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_read, "read") \
+ EM(netfs_fail_short_readpage, "short-readpage") \
+ EM(netfs_fail_short_write_begin, "short-write-begin") \
+ E_(netfs_fail_prepare_write, "prep-write")
+
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+netfs_read_traces;
+netfs_rreq_traces;
+netfs_sreq_sources;
+netfs_sreq_traces;
+netfs_failures;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(netfs_read,
+ TP_PROTO(struct netfs_read_request *rreq,
+ loff_t start, size_t len,
+ enum netfs_read_trace what),
+
+ TP_ARGS(rreq, start, len, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned int, cookie )
+ __field(loff_t, start )
+ __field(size_t, len )
+ __field(enum netfs_read_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->cookie = rreq->cookie_debug_id;
+ __entry->start = start;
+ __entry->len = len;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x %s c=%08x s=%llx %zx",
+ __entry->rreq,
+ __print_symbolic(__entry->what, netfs_read_traces),
+ __entry->cookie,
+ __entry->start, __entry->len)
+ );
+
+TRACE_EVENT(netfs_rreq,
+ TP_PROTO(struct netfs_read_request *rreq,
+ enum netfs_rreq_trace what),
+
+ TP_ARGS(rreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, flags )
+ __field(enum netfs_rreq_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->flags = rreq->flags;
+ __entry->what = what;
+ ),
+
+ TP_printk("R=%08x %s f=%02x",
+ __entry->rreq,
+ __print_symbolic(__entry->what, netfs_rreq_traces),
+ __entry->flags)
+ );
+
+TRACE_EVENT(netfs_sreq,
+ TP_PROTO(struct netfs_read_subrequest *sreq,
+ enum netfs_sreq_trace what),
+
+ TP_ARGS(sreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, index )
+ __field(short, error )
+ __field(unsigned short, flags )
+ __field(enum netfs_read_source, source )
+ __field(enum netfs_sreq_trace, what )
+ __field(size_t, len )
+ __field(size_t, transferred )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = sreq->rreq->debug_id;
+ __entry->index = sreq->debug_index;
+ __entry->error = sreq->error;
+ __entry->flags = sreq->flags;
+ __entry->source = sreq->source;
+ __entry->what = what;
+ __entry->len = sreq->len;
+ __entry->transferred = sreq->transferred;
+ __entry->start = sreq->start;
+ ),
+
+ TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->what, netfs_sreq_traces),
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __entry->flags,
+ __entry->start, __entry->transferred, __entry->len,
+ __entry->error)
+ );
+
+TRACE_EVENT(netfs_failure,
+ TP_PROTO(struct netfs_read_request *rreq,
+ struct netfs_read_subrequest *sreq,
+ int error, enum netfs_failure what),
+
+ TP_ARGS(rreq, sreq, error, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, index )
+ __field(short, error )
+ __field(unsigned short, flags )
+ __field(enum netfs_read_source, source )
+ __field(enum netfs_failure, what )
+ __field(size_t, len )
+ __field(size_t, transferred )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = rreq->debug_id;
+ __entry->index = sreq ? sreq->debug_index : 0;
+ __entry->error = error;
+ __entry->flags = sreq ? sreq->flags : 0;
+ __entry->source = sreq ? sreq->source : NETFS_INVALID_READ;
+ __entry->what = what;
+ __entry->len = sreq ? sreq->len : 0;
+ __entry->transferred = sreq ? sreq->transferred : 0;
+ __entry->start = sreq ? sreq->start : 0;
+ ),
+
+ TP_printk("R=%08x[%u] %s f=%02x s=%llx %zx/%zx %s e=%d",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __entry->flags,
+ __entry->start, __entry->transferred, __entry->len,
+ __print_symbolic(__entry->what, netfs_failures),
+ __entry->error)
+ );
+
+#endif /* _TRACE_NETFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>