diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-10 20:51:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-10 20:51:03 -0700 |
commit | 25cd6f355dab9d11b7c8a4005867d5a30b8b14ee (patch) | |
tree | 32bd4285853eae38412714171cfda4c732e2f45b /fs | |
parent | 40f06c799539739a08a56be8a096f56aeed05731 (diff) | |
parent | 0564336329f0b03a78221ddf51e52af3665e5720 (diff) |
Merge tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt
Pull fscrypt updates from Eric Biggers:
- Preparations for supporting encryption on ext4 filesystems where the
filesystem block size is smaller than PAGE_SIZE.
- Don't allow setting encryption policies on dead directories.
- Various cleanups.
* tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt:
fscrypt: document testing with xfstests
fscrypt: remove selection of CONFIG_CRYPTO_SHA256
fscrypt: remove unnecessary includes of ratelimit.h
fscrypt: don't set policy for a dead directory
ext4: encrypt only up to last block in ext4_bio_write_page()
ext4: decrypt only the needed block in __ext4_block_zero_page_range()
ext4: decrypt only the needed blocks in ext4_block_write_begin()
ext4: clear BH_Uptodate flag on decryption error
fscrypt: decrypt only the needed blocks in __fscrypt_decrypt_bio()
fscrypt: support decrypting multiple filesystem blocks per page
fscrypt: introduce fscrypt_decrypt_block_inplace()
fscrypt: handle blocksize < PAGE_SIZE in fscrypt_zeroout_range()
fscrypt: support encrypting multiple filesystem blocks per page
fscrypt: introduce fscrypt_encrypt_block_inplace()
fscrypt: clean up some BUG_ON()s in block encryption/decryption
fscrypt: rename fscrypt_do_page_crypto() to fscrypt_crypt_block()
fscrypt: remove the "write" part of struct fscrypt_ctx
fscrypt: simplify bounce page handling
Diffstat (limited to 'fs')
-rw-r--r-- | fs/crypto/Kconfig | 1 | ||||
-rw-r--r-- | fs/crypto/bio.c | 73 | ||||
-rw-r--r-- | fs/crypto/crypto.c | 299 | ||||
-rw-r--r-- | fs/crypto/fname.c | 1 | ||||
-rw-r--r-- | fs/crypto/fscrypt_private.h | 15 | ||||
-rw-r--r-- | fs/crypto/hooks.c | 1 | ||||
-rw-r--r-- | fs/crypto/keyinfo.c | 1 | ||||
-rw-r--r-- | fs/crypto/policy.c | 2 | ||||
-rw-r--r-- | fs/ext4/inode.c | 37 | ||||
-rw-r--r-- | fs/ext4/page-io.c | 44 | ||||
-rw-r--r-- | fs/f2fs/data.c | 17 | ||||
-rw-r--r-- | fs/ubifs/crypto.c | 19 |
12 files changed, 255 insertions, 255 deletions
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 24ed99e2eca0..5fdf24877c17 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -7,7 +7,6 @@ config FS_ENCRYPTION select CRYPTO_ECB select CRYPTO_XTS select CRYPTO_CTS - select CRYPTO_SHA256 select KEYS help Enable encryption of files and directories. This diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index b46021ebde85..82da2510721f 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -33,9 +33,8 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done) bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; - int ret = fscrypt_decrypt_page(page->mapping->host, page, - PAGE_SIZE, 0, page->index); - + int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len, + bv->bv_offset); if (ret) SetPageError(page); else if (done) @@ -53,9 +52,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio); static void completion_pages(struct work_struct *work) { - struct fscrypt_ctx *ctx = - container_of(work, struct fscrypt_ctx, r.work); - struct bio *bio = ctx->r.bio; + struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work); + struct bio *bio = ctx->bio; __fscrypt_decrypt_bio(bio, true); fscrypt_release_ctx(ctx); @@ -64,57 +62,29 @@ static void completion_pages(struct work_struct *work) void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) { - INIT_WORK(&ctx->r.work, completion_pages); - ctx->r.bio = bio; - fscrypt_enqueue_decrypt_work(&ctx->r.work); + INIT_WORK(&ctx->work, completion_pages); + ctx->bio = bio; + fscrypt_enqueue_decrypt_work(&ctx->work); } EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); -void fscrypt_pullback_bio_page(struct page **page, bool restore) -{ - struct fscrypt_ctx *ctx; - struct page *bounce_page; - - /* The bounce data pages are unmapped. */ - if ((*page)->mapping) - return; - - /* The bounce data page is unmapped. */ - bounce_page = *page; - ctx = (struct fscrypt_ctx *)page_private(bounce_page); - - /* restore control page */ - *page = ctx->w.control_page; - - if (restore) - fscrypt_restore_control_page(bounce_page); -} -EXPORT_SYMBOL(fscrypt_pullback_bio_page); - int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { - struct fscrypt_ctx *ctx; - struct page *ciphertext_page = NULL; + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + struct page *ciphertext_page; struct bio *bio; int ret, err = 0; - BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); - - ctx = fscrypt_get_ctx(GFP_NOFS); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT); - if (IS_ERR(ciphertext_page)) { - err = PTR_ERR(ciphertext_page); - goto errout; - } + ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); + if (!ciphertext_page) + return -ENOMEM; while (len--) { - err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, - ZERO_PAGE(0), ciphertext_page, - PAGE_SIZE, 0, GFP_NOFS); + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), ciphertext_page, + blocksize, 0, GFP_NOFS); if (err) goto errout; @@ -124,14 +94,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, goto errout; } bio_set_dev(bio, inode->i_sb->s_bdev); - bio->bi_iter.bi_sector = - pblk << (inode->i_sb->s_blocksize_bits - 9); + bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - ret = bio_add_page(bio, ciphertext_page, - inode->i_sb->s_blocksize, 0); - if (ret != inode->i_sb->s_blocksize) { + ret = bio_add_page(bio, ciphertext_page, blocksize, 0); + if (WARN_ON(ret != blocksize)) { /* should never happen! */ - WARN_ON(1); bio_put(bio); err = -EIO; goto errout; @@ -147,7 +114,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, } err = 0; errout: - fscrypt_release_ctx(ctx); + fscrypt_free_bounce_page(ciphertext_page); return err; } EXPORT_SYMBOL(fscrypt_zeroout_range); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 335a362ee446..45c3d0427fb2 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -59,23 +59,16 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work) EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); /** - * fscrypt_release_ctx() - Releases an encryption context - * @ctx: The encryption context to release. + * fscrypt_release_ctx() - Release a decryption context + * @ctx: The decryption context to release. * - * If the encryption context was allocated from the pre-allocated pool, returns - * it to that pool. Else, frees it. - * - * If there's a bounce page in the context, this frees that. + * If the decryption context was allocated from the pre-allocated pool, return + * it to that pool. Else, free it. */ void fscrypt_release_ctx(struct fscrypt_ctx *ctx) { unsigned long flags; - if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) { - mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); - ctx->w.bounce_page = NULL; - } - ctx->w.control_page = NULL; if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { kmem_cache_free(fscrypt_ctx_cachep, ctx); } else { @@ -87,12 +80,12 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx) EXPORT_SYMBOL(fscrypt_release_ctx); /** - * fscrypt_get_ctx() - Gets an encryption context + * fscrypt_get_ctx() - Get a decryption context * @gfp_flags: The gfp flag for memory allocation * - * Allocates and initializes an encryption context. + * Allocate and initialize a decryption context. * - * Return: A new encryption context on success; an ERR_PTR() otherwise. + * Return: A new decryption context on success; an ERR_PTR() otherwise. */ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) { @@ -100,14 +93,8 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) unsigned long flags; /* - * We first try getting the ctx from a free list because in - * the common case the ctx will have an allocated and - * initialized crypto tfm, so it's probably a worthwhile - * optimization. For the bounce page, we first try getting it - * from the kernel allocator because that's just about as fast - * as getting it from a list and because a cache of free pages - * should generally be a "last resort" option for a filesystem - * to be able to do its job. + * First try getting a ctx from the free list so that we don't have to + * call into the slab allocator. */ spin_lock_irqsave(&fscrypt_ctx_lock, flags); ctx = list_first_entry_or_null(&fscrypt_free_ctxs, @@ -123,11 +110,31 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) } else { ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; } - ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL; return ctx; } EXPORT_SYMBOL(fscrypt_get_ctx); +struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) +{ + return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); +} + +/** + * fscrypt_free_bounce_page() - free a ciphertext bounce page + * + * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(), + * or by fscrypt_alloc_bounce_page() directly. + */ +void fscrypt_free_bounce_page(struct page *bounce_page) +{ + if (!bounce_page) + return; + set_page_private(bounce_page, (unsigned long)NULL); + ClearPagePrivate(bounce_page); + mempool_free(bounce_page, fscrypt_bounce_page_pool); +} +EXPORT_SYMBOL(fscrypt_free_bounce_page); + void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci) { @@ -141,10 +148,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw); } -int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, - u64 lblk_num, struct page *src_page, - struct page *dest_page, unsigned int len, - unsigned int offs, gfp_t gfp_flags) +/* Encrypt or decrypt a single filesystem block of file contents */ +int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, + u64 lblk_num, struct page *src_page, + struct page *dest_page, unsigned int len, + unsigned int offs, gfp_t gfp_flags) { union fscrypt_iv iv; struct skcipher_request *req = NULL; @@ -154,7 +162,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; - BUG_ON(len == 0); + if (WARN_ON_ONCE(len <= 0)) + return -EINVAL; + if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0)) + return -EINVAL; fscrypt_generate_iv(&iv, lblk_num, ci); @@ -186,126 +197,158 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, return 0; } -struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, - gfp_t gfp_flags) -{ - ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); - if (ctx->w.bounce_page == NULL) - return ERR_PTR(-ENOMEM); - ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL; - return ctx->w.bounce_page; -} - /** - * fscypt_encrypt_page() - Encrypts a page - * @inode: The inode for which the encryption should take place - * @page: The page to encrypt. Must be locked for bounce-page - * encryption. - * @len: Length of data to encrypt in @page and encrypted - * data in returned page. - * @offs: Offset of data within @page and returned - * page holding encrypted data. - * @lblk_num: Logical block number. This must be unique for multiple - * calls with same inode, except when overwriting - * previously written data. - * @gfp_flags: The gfp flag for memory allocation - * - * Encrypts @page using the ctx encryption context. Performs encryption - * either in-place or into a newly allocated bounce page. - * Called on the page write path. + * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page + * @page: The locked pagecache page containing the block(s) to encrypt + * @len: Total size of the block(s) to encrypt. Must be a nonzero + * multiple of the filesystem's block size. + * @offs: Byte offset within @page of the first block to encrypt. Must be + * a multiple of the filesystem's block size. + * @gfp_flags: Memory allocation flags * - * Bounce page allocation is the default. - * In this case, the contents of @page are encrypted and stored in an - * allocated bounce page. @page has to be locked and the caller must call - * fscrypt_restore_control_page() on the returned ciphertext page to - * release the bounce buffer and the encryption context. + * A new bounce page is allocated, and the specified block(s) are encrypted into + * it. In the bounce page, the ciphertext block(s) will be located at the same + * offsets at which the plaintext block(s) were located in the source page; any + * other parts of the bounce page will be left uninitialized. However, normally + * blocksize == PAGE_SIZE and the whole page is encrypted at once. * - * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in - * fscrypt_operations. Here, the input-page is returned with its content - * encrypted. + * This is for use by the filesystem's ->writepages() method. * - * Return: A page with the encrypted content on success. Else, an - * error value or NULL. + * Return: the new encrypted bounce page on success; an ERR_PTR() on failure */ -struct page *fscrypt_encrypt_page(const struct inode *inode, - struct page *page, - unsigned int len, - unsigned int offs, - u64 lblk_num, gfp_t gfp_flags) +struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags) { - struct fscrypt_ctx *ctx; - struct page *ciphertext_page = page; + const struct inode *inode = page->mapping->host; + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + struct page *ciphertext_page; + u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) + + (offs >> blockbits); + unsigned int i; int err; - BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0); + if (WARN_ON_ONCE(!PageLocked(page))) + return ERR_PTR(-EINVAL); - if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { - /* with inplace-encryption we just encrypt the page */ - err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, - ciphertext_page, len, offs, - gfp_flags); - if (err) - return ERR_PTR(err); + if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) + return ERR_PTR(-EINVAL); - return ciphertext_page; - } - - BUG_ON(!PageLocked(page)); - - ctx = fscrypt_get_ctx(gfp_flags); - if (IS_ERR(ctx)) - return ERR_CAST(ctx); - - /* The encryption operation will require a bounce page. */ - ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); - if (IS_ERR(ciphertext_page)) - goto errout; + ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags); + if (!ciphertext_page) + return ERR_PTR(-ENOMEM); - ctx->w.control_page = page; - err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, - page, ciphertext_page, len, offs, - gfp_flags); - if (err) { - ciphertext_page = ERR_PTR(err); - goto errout; + for (i = offs; i < offs + len; i += blocksize, lblk_num++) { + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, + page, ciphertext_page, + blocksize, i, gfp_flags); + if (err) { + fscrypt_free_bounce_page(ciphertext_page); + return ERR_PTR(err); + } } SetPagePrivate(ciphertext_page); - set_page_private(ciphertext_page, (unsigned long)ctx); - lock_page(ciphertext_page); + set_page_private(ciphertext_page, (unsigned long)page); return ciphertext_page; +} +EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks); -errout: - fscrypt_release_ctx(ctx); - return ciphertext_page; +/** + * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place + * @inode: The inode to which this block belongs + * @page: The page containing the block to encrypt + * @len: Size of block to encrypt. Doesn't need to be a multiple of the + * fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE. + * @offs: Byte offset within @page at which the block to encrypt begins + * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based + * number of the block within the file + * @gfp_flags: Memory allocation flags + * + * Encrypt a possibly-compressed filesystem block that is located in an + * arbitrary page, not necessarily in the original pagecache page. The @inode + * and @lblk_num must be specified, as they can't be determined from @page. + * + * Return: 0 on success; -errno on failure + */ +int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num, gfp_t gfp_flags) +{ + return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page, + len, offs, gfp_flags); } -EXPORT_SYMBOL(fscrypt_encrypt_page); +EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); /** - * fscrypt_decrypt_page() - Decrypts a page in-place - * @inode: The corresponding inode for the page to decrypt. - * @page: The page to decrypt. Must be locked in case - * it is a writeback page (FS_CFLG_OWN_PAGES unset). - * @len: Number of bytes in @page to be decrypted. - * @offs: Start of data in @page. - * @lblk_num: Logical block number. + * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page + * @page: The locked pagecache page containing the block(s) to decrypt + * @len: Total size of the block(s) to decrypt. Must be a nonzero + * multiple of the filesystem's block size. + * @offs: Byte offset within @page of the first block to decrypt. Must be + * a multiple of the filesystem's block size. * - * Decrypts page in-place using the ctx encryption context. + * The specified block(s) are decrypted in-place within the pagecache page, + * which must still be locked and not uptodate. Normally, blocksize == + * PAGE_SIZE and the whole page is decrypted at once. * - * Called from the read completion callback. + * This is for use by the filesystem's ->readpages() method. * - * Return: Zero on success, non-zero otherwise. + * Return: 0 on success; -errno on failure */ -int fscrypt_decrypt_page(const struct inode *inode, struct page *page, - unsigned int len, unsigned int offs, u64 lblk_num) +int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, + unsigned int offs) { - if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) - BUG_ON(!PageLocked(page)); + const struct inode *inode = page->mapping->host; + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) + + (offs >> blockbits); + unsigned int i; + int err; + + if (WARN_ON_ONCE(!PageLocked(page))) + return -EINVAL; + + if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) + return -EINVAL; + + for (i = offs; i < offs + len; i += blocksize, lblk_num++) { + err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, + page, blocksize, i, GFP_NOFS); + if (err) + return err; + } + return 0; +} +EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks); - return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, - len, offs, GFP_NOFS); +/** + * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place + * @inode: The inode to which this block belongs + * @page: The page containing the block to decrypt + * @len: Size of block to decrypt. Doesn't need to be a multiple of the + * fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE. + * @offs: Byte offset within @page at which the block to decrypt begins + * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based + * number of the block within the file + * + * Decrypt a possibly-compressed filesystem block that is located in an + * arbitrary page, not necessarily in the original pagecache page. The @inode + * and @lblk_num must be specified, as they can't be determined from @page. + * + * Return: 0 on success; -errno on failure + */ +int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num) +{ + return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page, + len, offs, GFP_NOFS); } -EXPORT_SYMBOL(fscrypt_decrypt_page); +EXPORT_SYMBOL(fscrypt_decrypt_block_inplace); /* * Validate dentries in encrypted directories to make sure we aren't potentially @@ -355,18 +398,6 @@ const struct dentry_operations fscrypt_d_ops = { .d_revalidate = fscrypt_d_revalidate, }; -void fscrypt_restore_control_page(struct page *page) -{ - struct fscrypt_ctx *ctx; - - ctx = (struct fscrypt_ctx *)page_private(page); - set_page_private(page, (unsigned long)NULL); - ClearPagePrivate(page); - unlock_page(page); - fscrypt_release_ctx(ctx); -} -EXPORT_SYMBOL(fscrypt_restore_control_page); - static void fscrypt_destroy(void) { struct fscrypt_ctx *pos, *n; diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index eccea3d8f923..00d150ff3033 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -12,7 +12,6 @@ */ #include <linux/scatterlist.h> -#include <linux/ratelimit.h> #include <crypto/skcipher.h> #include "fscrypt_private.h" diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 7da276159593..8978eec9d766 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -94,7 +94,6 @@ typedef enum { } fscrypt_direction_t; #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 -#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 static inline bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) @@ -117,14 +116,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; extern int fscrypt_initialize(unsigned int cop_flags); -extern int fscrypt_do_page_crypto(const struct inode *inode, - fscrypt_direction_t rw, u64 lblk_num, - struct page *src_page, - struct page *dest_page, - unsigned int len, unsigned int offs, - gfp_t gfp_flags); -extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, - gfp_t gfp_flags); +extern int fscrypt_crypt_block(const struct inode *inode, + fscrypt_direction_t rw, u64 lblk_num, + struct page *src_page, struct page *dest_page, + unsigned int len, unsigned int offs, + gfp_t gfp_flags); +extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); extern const struct dentry_operations fscrypt_d_ops; extern void __printf(3, 4) __cold diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index bd525f7573a4..c1d6715d88e9 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -5,7 +5,6 @@ * Encryption hooks for higher-level filesystem operations. */ -#include <linux/ratelimit.h> #include "fscrypt_private.h" /** diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index dcd91a3fbe49..207ebed918c1 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -12,7 +12,6 @@ #include <keys/user-type.h> #include <linux/hashtable.h> #include <linux/scatterlist.h> -#include <linux/ratelimit.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/sha.h> diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index d536889ac31b..4941fe8471ce 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) if (ret == -ENODATA) { if (!S_ISDIR(inode->i_mode)) ret = -ENOTDIR; + else if (IS_DEADDIR(inode)) + ret = -ENOENT; else if (!inode->i_sb->s_cop->empty_dir(inode)) ret = -ENOTEMPTY; else diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c7f77c643008..f65357735a1a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, int err = 0; unsigned blocksize = inode->i_sb->s_blocksize; unsigned bbits; - struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; - bool decrypt = false; + struct buffer_head *bh, *head, *wait[2]; + int nr_wait = 0; + int i; BUG_ON(!PageLocked(page)); BUG_ON(from > PAGE_SIZE); @@ -1217,23 +1218,32 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); - *wait_bh++ = bh; - decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); + wait[nr_wait++] = bh; } } /* * If we issued read requests, let them complete. */ - while (wait_bh > wait) { - wait_on_buffer(*--wait_bh); - if (!buffer_uptodate(*wait_bh)) + for (i = 0; i < nr_wait; i++) { + wait_on_buffer(wait[i]); + if (!buffer_uptodate(wait[i])) err = -EIO; } - if (unlikely(err)) + if (unlikely(err)) { page_zero_new_buffers(page, from, to); - else if (decrypt) - err = fscrypt_decrypt_page(page->mapping->host, page, - PAGE_SIZE, 0, page->index); + } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { + for (i = 0; i < nr_wait; i++) { + int err2; + + err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize, + bh_offset(wait[i])); + if (err2) { + clear_buffer_uptodate(wait[i]); + err = err2; + } + } + } + return err; } #endif @@ -4065,9 +4075,8 @@ static int __ext4_block_zero_page_range(handle_t *handle, if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); - BUG_ON(blocksize != PAGE_SIZE); - WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, - page, PAGE_SIZE, 0, page->index)); + WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks( + page, blocksize, bh_offset(bh))); } } if (ext4_should_journal_data(inode)) { diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 4690618a92e9..a18a47a2a1d1 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio) bio_for_each_segment_all(bvec, bio, iter_all) { struct page *page = bvec->bv_page; -#ifdef CONFIG_FS_ENCRYPTION - struct page *data_page = NULL; -#endif + struct page *bounce_page = NULL; struct buffer_head *bh, *head; unsigned bio_start = bvec->bv_offset; unsigned bio_end = bio_start + bvec->bv_len; @@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio) if (!page) continue; -#ifdef CONFIG_FS_ENCRYPTION - if (!page->mapping) { - /* The bounce data pages are unmapped. */ - data_page = page; - fscrypt_pullback_bio_page(&page, false); + if (fscrypt_is_bounce_page(page)) { + bounce_page = page; + page = fscrypt_pagecache_page(bounce_page); } -#endif if (bio->bi_status) { SetPageError(page); @@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio) bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); local_irq_restore(flags); if (!under_io) { -#ifdef CONFIG_FS_ENCRYPTION - if (data_page) - fscrypt_restore_control_page(data_page); -#endif + fscrypt_free_bounce_page(bounce_page); end_page_writeback(page); } } @@ -415,7 +407,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, struct writeback_control *wbc, bool keep_towrite) { - struct page *data_page = NULL; + struct page *bounce_page = NULL; struct inode *inode = page->mapping->host; unsigned block_start; struct buffer_head *bh, *head; @@ -475,14 +467,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io, bh = head = page_buffers(page); + /* + * If any blocks are being written to an encrypted file, encrypt them + * into a bounce page. For simplicity, just encrypt until the last + * block which might be needed. This may cause some unneeded blocks + * (e.g. holes) to be unnecessarily encrypted, but this is rare and + * can't happen in the common case of blocksize == PAGE_SIZE. + */ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { gfp_t gfp_flags = GFP_NOFS; + unsigned int enc_bytes = round_up(len, i_blocksize(inode)); retry_encrypt: - data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0, - page->index, gfp_flags); - if (IS_ERR(data_page)) { - ret = PTR_ERR(data_page); + bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes, + 0, gfp_flags); + if (IS_ERR(bounce_page)) { + ret = PTR_ERR(bounce_page); if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { if (io->io_bio) { ext4_io_submit(io); @@ -491,7 +491,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, gfp_flags |= __GFP_NOFAIL; goto retry_encrypt; } - data_page = NULL; + bounce_page = NULL; goto out; } } @@ -500,8 +500,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; - ret = io_submit_add_bh(io, inode, - data_page ? data_page : page, bh); + ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh); if (ret) { /* * We only get here on ENOMEM. Not much else @@ -517,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, /* Error stopped previous loop? Clean up buffers... */ if (ret) { out: - if (data_page) - fscrypt_restore_control_page(data_page); + fscrypt_free_bounce_page(bounce_page); printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); redirty_page_for_writepage(wbc, page); do { diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index eda4181d2092..a546ac8685ea 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -185,7 +185,7 @@ static void f2fs_write_end_io(struct bio *bio) continue; } - fscrypt_pullback_bio_page(&page, true); + fscrypt_finalize_bounce_page(&page); if (unlikely(bio->bi_status)) { mapping_set_error(page->mapping, -EIO); @@ -362,10 +362,9 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, bio_for_each_segment_all(bvec, io->bio, iter_all) { - if (bvec->bv_page->mapping) - target = bvec->bv_page; - else - target = fscrypt_control_page(bvec->bv_page); + target = bvec->bv_page; + if (fscrypt_is_bounce_page(target)) + target = fscrypt_pagecache_page(target); if (inode && inode == target->mapping->host) return true; @@ -1727,8 +1726,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio) f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); retry_encrypt: - fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, - PAGE_SIZE, 0, fio->page->index, gfp_flags); + fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page, + PAGE_SIZE, 0, + gfp_flags); if (IS_ERR(fio->encrypted_page)) { /* flush pending IOs and wait for a while in the ENOMEM case */ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { @@ -1900,8 +1900,7 @@ got_it: err = f2fs_inplace_write_data(fio); if (err) { if (f2fs_encrypted_file(inode)) - fscrypt_pullback_bio_page(&fio->encrypted_page, - true); + fscrypt_finalize_bounce_page(&fio->encrypted_page); if (PageWriteback(page)) end_page_writeback(page); } else { diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c index 4aaedf2d7f44..22be7aeb96c4 100644 --- a/fs/ubifs/crypto.c +++ b/fs/ubifs/crypto.c @@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, { struct ubifs_info *c = inode->i_sb->s_fs_info; void *p = &dn->data; - struct page *ret; unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE); + int err; ubifs_assert(c, pad_len <= *out_len); dn->compr_size = cpu_to_le16(in_len); @@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, if (pad_len != in_len) memset(p + in_len, 0, pad_len - in_len); - ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len, - offset_in_page(&dn->data), block, GFP_NOFS); - if (IS_ERR(ret)) { - ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret)); - return PTR_ERR(ret); + err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len, + offset_in_page(p), block, GFP_NOFS); + if (err) { + ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err); + return err; } *out_len = pad_len; @@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, } ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE); - err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen, - offset_in_page(&dn->data), block); + err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data), + dlen, offset_in_page(&dn->data), + block); if (err) { - ubifs_err(c, "fscrypt_decrypt_page failed: %i", err); + ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err); return err; } *out_len = clen; |