1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* Unbuffered and direct write support.
*
* Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/export.h>
#include <linux/uio.h>
#include "internal.h"
static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
{
struct inode *inode = wreq->inode;
unsigned long long end = wreq->start + wreq->transferred;
if (!wreq->error &&
i_size_read(inode) < end) {
if (wreq->netfs_ops->update_i_size)
wreq->netfs_ops->update_i_size(inode, end);
else
i_size_write(inode, end);
}
}
/*
* Perform an unbuffered write where we may have to do an RMW operation on an
* encrypted file. This can also be used for direct I/O writes.
*/
ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
struct netfs_group *netfs_group)
{
struct netfs_io_request *wreq;
unsigned long long start = iocb->ki_pos;
unsigned long long end = start + iov_iter_count(iter);
ssize_t ret, n;
size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
_enter("");
/* We're going to need a bounce buffer if what we transmit is going to
* be different in some way to the source buffer, e.g. because it gets
* encrypted/compressed or because it needs expanding to a block size.
*/
// TODO
_debug("uw %llx-%llx", start, end);
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
iocb->ki_flags & IOCB_DIRECT ?
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
if (IS_ERR(wreq))
return PTR_ERR(wreq);
wreq->io_streams[0].avail = true;
trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
netfs_write_trace_dio_write :
netfs_write_trace_unbuffered_write));
{
/* If this is an async op and we're not using a bounce buffer,
* we have to save the source buffer as the iterator is only
* good until we return. In such a case, extract an iterator
* to represent as much of the the output buffer as we can
* manage. Note that the extraction might not be able to
* allocate a sufficiently large bvec array and may shorten the
* request.
*/
if (async || user_backed_iter(iter)) {
n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
if (n < 0) {
ret = n;
goto out;
}
wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
} else {
wreq->iter = *iter;
}
wreq->io_iter = wreq->iter;
}
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
/* Copy the data into the bounce buffer and encrypt it. */
// TODO
/* Dispatch the write. */
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
if (async)
wreq->iocb = iocb;
wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
if (ret < 0) {
_debug("begin = %zd", ret);
goto out;
}
if (!async) {
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE);
smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error;
if (ret == 0) {
ret = wreq->transferred;
iocb->ki_pos += ret;
}
} else {
ret = -EIOCBQUEUED;
}
out:
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
/**
* netfs_unbuffered_write_iter - Unbuffered write to a file
* @iocb: IO state structure
* @from: iov_iter with data to write
*
* Do an unbuffered write to a file, writing the data directly to the server
* and not lodging the data in the pagecache.
*
* Return:
* * Negative error code if no data has been written at all of
* vfs_fsync_range() failed for a synchronous write
* * Number of bytes written, even for truncated writes
*/
ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct netfs_inode *ictx = netfs_inode(inode);
ssize_t ret;
loff_t pos = iocb->ki_pos;
unsigned long long end = pos + iov_iter_count(from) - 1;
_enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
trace_netfs_write_iter(iocb, from);
netfs_stat(&netfs_n_wh_dio_write);
ret = netfs_start_io_direct(inode);
if (ret < 0)
return ret;
ret = generic_write_checks(iocb, from);
if (ret <= 0)
goto out;
ret = file_remove_privs(file);
if (ret < 0)
goto out;
ret = file_update_time(file);
if (ret < 0)
goto out;
if (iocb->ki_flags & IOCB_NOWAIT) {
/* We could block if there are any pages in the range. */
ret = -EAGAIN;
if (filemap_range_has_page(mapping, pos, end))
if (filemap_invalidate_inode(inode, true, pos, end))
goto out;
} else {
ret = filemap_write_and_wait_range(mapping, pos, end);
if (ret < 0)
goto out;
}
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
ret = filemap_invalidate_inode(inode, true, pos, end);
if (ret < 0)
goto out;
end = iocb->ki_pos + iov_iter_count(from);
if (end > ictx->zero_point)
ictx->zero_point = end;
fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
FSCACHE_INVAL_DIO_WRITE);
ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
out:
netfs_end_io_direct(inode);
return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_write_iter);
|