diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-11-02 08:19:51 -1000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-11-02 08:19:51 -1000 |
commit | 5efad0a7658cea6dfd22d4e75d247692e48dde10 (patch) | |
tree | 5731c33b2507b187c407a0d4ddb9641e55a4c020 /fs/ext2/dir.c | |
parent | e9806ff8a0f9e6eb193326383a7b88bf30ad0533 (diff) | |
parent | 82dd620653b322a908db2d4741bbf64c12cbdb54 (diff) |
Merge tag 'fs_for_v6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull ext2, udf, and quota updates from Jan Kara:
- conversion of ext2 directory code to use folios
- cleanups in UDF declarations
- bugfix for quota interaction with file encryption
* tag 'fs_for_v6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
ext2: Convert ext2_prepare_chunk and ext2_commit_chunk to folios
ext2: Convert ext2_make_empty() to use a folio
ext2: Convert ext2_unlink() and ext2_rename() to use folios
ext2: Convert ext2_delete_entry() to use folios
ext2: Convert ext2_empty_dir() to use a folio
ext2: Convert ext2_add_link() to use a folio
ext2: Convert ext2_readdir to use a folio
ext2: Add ext2_get_folio()
ext2: Convert ext2_check_page to ext2_check_folio
highmem: Add folio_release_kmap()
udf: Avoid unneeded variable length array in struct fileIdentDesc
udf: Annotate struct udf_bitmap with __counted_by
quota: explicitly forbid quota files from being encrypted
Diffstat (limited to 'fs/ext2/dir.c')
-rw-r--r-- | fs/ext2/dir.c | 214 |
1 files changed, 107 insertions, 107 deletions
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index c7900868171b..4fb155b5a958 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -81,34 +81,34 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr) return last_byte; } -static void ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) +static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = folio->mapping; struct inode *dir = mapping->host; inode_inc_iversion(dir); - block_write_end(NULL, mapping, pos, len, len, page, NULL); + block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } - unlock_page(page); + folio_unlock(folio); } -static bool ext2_check_page(struct page *page, int quiet, char *kaddr) +static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr) { - struct inode *dir = page->mapping->host; + struct inode *dir = folio->mapping->host; struct super_block *sb = dir->i_sb; unsigned chunk_size = ext2_chunk_size(dir); u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); unsigned offs, rec_len; - unsigned limit = PAGE_SIZE; + unsigned limit = folio_size(folio); ext2_dirent *p; char *error; - if ((dir->i_size >> PAGE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_MASK; + if (dir->i_size < folio_pos(folio) + limit) { + limit = offset_in_folio(folio, dir->i_size); if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -132,7 +132,7 @@ static bool ext2_check_page(struct page *page, int quiet, char *kaddr) if (offs != limit) goto Eend; out: - SetPageChecked(page); + folio_set_checked(folio); return true; /* Too bad, we had an error */ @@ -160,51 +160,52 @@ Einumber: bad_entry: if (!quiet) ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " - "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, + "offset=%llu, inode=%lu, rec_len=%d, name_len=%d", + dir->i_ino, error, folio_pos(folio) + offs, (unsigned long) le32_to_cpu(p->inode), rec_len, p->name_len); goto fail; Eend: if (!quiet) { p = (ext2_dirent *)(kaddr + offs); - ext2_error(sb, "ext2_check_page", + ext2_error(sb, "ext2_check_folio", "entry in directory #%lu spans the page boundary" - "offset=%lu, inode=%lu", - dir->i_ino, (page->index<<PAGE_SHIFT)+offs, + "offset=%llu, inode=%lu", + dir->i_ino, folio_pos(folio) + offs, (unsigned long) le32_to_cpu(p->inode)); } fail: - SetPageError(page); + folio_set_error(folio); return false; } /* - * Calls to ext2_get_page()/ext2_put_page() must be nested according to the - * rules documented in kmap_local_page()/kunmap_local(). + * Calls to ext2_get_folio()/folio_release_kmap() must be nested according + * to the rules documented in kmap_local_folio()/kunmap_local(). * - * NOTE: ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() - * and should be treated as a call to ext2_get_page() for nesting purposes. + * NOTE: ext2_find_entry() and ext2_dotdot() act as a call + * to folio_release_kmap() and should be treated as a call to + * folio_release_kmap() for nesting purposes. */ -static void *ext2_get_page(struct inode *dir, unsigned long n, - int quiet, struct page **page) +static void *ext2_get_folio(struct inode *dir, unsigned long n, + int quiet, struct folio **foliop) { struct address_space *mapping = dir->i_mapping; struct folio *folio = read_mapping_folio(mapping, n, NULL); - void *page_addr; + void *kaddr; if (IS_ERR(folio)) return ERR_CAST(folio); - page_addr = kmap_local_folio(folio, n & (folio_nr_pages(folio) - 1)); + kaddr = kmap_local_folio(folio, 0); if (unlikely(!folio_test_checked(folio))) { - if (!ext2_check_page(&folio->page, quiet, page_addr)) + if (!ext2_check_folio(folio, quiet, kaddr)) goto fail; } - *page = &folio->page; - return page_addr; + *foliop = folio; + return kaddr; fail: - ext2_put_page(&folio->page, page_addr); + folio_release_kmap(folio, kaddr); return ERR_PTR(-EIO); } @@ -274,8 +275,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx) for ( ; n < npages; n++, offset = 0) { ext2_dirent *de; - struct page *page; - char *kaddr = ext2_get_page(inode, n, 0, &page); + struct folio *folio; + char *kaddr = ext2_get_folio(inode, n, 0, &folio); char *limit; if (IS_ERR(kaddr)) { @@ -299,7 +300,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx) if (de->rec_len == 0) { ext2_error(sb, __func__, "zero-length directory entry"); - ext2_put_page(page, de); + folio_release_kmap(folio, de); return -EIO; } if (de->inode) { @@ -311,13 +312,13 @@ ext2_readdir(struct file *file, struct dir_context *ctx) if (!dir_emit(ctx, de->name, de->name_len, le32_to_cpu(de->inode), d_type)) { - ext2_put_page(page, de); + folio_release_kmap(folio, de); return 0; } } ctx->pos += ext2_rec_len_from_disk(de->rec_len); } - ext2_put_page(page, kaddr); + folio_release_kmap(folio, kaddr); } return 0; } @@ -330,38 +331,35 @@ ext2_readdir(struct file *file, struct dir_context *ctx) * and the entry itself. Page is returned mapped and unlocked. * Entry is guaranteed to be valid. * - * On Success ext2_put_page() should be called on *res_page. + * On Success folio_release_kmap() should be called on *foliop. * - * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to - * the rules documented in kmap_local_page()/kunmap_local(). + * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested + * according to the rules documented in kmap_local_folio()/kunmap_local(). * - * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and - * should be treated as a call to ext2_get_page() for nesting purposes. + * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio() + * and should be treated as a call to ext2_get_folio() for nesting + * purposes. */ struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, - const struct qstr *child, struct page **res_page) + const struct qstr *child, struct folio **foliop) { const char *name = child->name; int namelen = child->len; unsigned reclen = EXT2_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = dir_pages(dir); - struct page *page = NULL; struct ext2_inode_info *ei = EXT2_I(dir); ext2_dirent * de; if (npages == 0) goto out; - /* OFFSET_CACHE */ - *res_page = NULL; - start = ei->i_dir_start_lookup; if (start >= npages) start = 0; n = start; do { - char *kaddr = ext2_get_page(dir, n, 0, &page); + char *kaddr = ext2_get_folio(dir, n, 0, foliop); if (IS_ERR(kaddr)) return ERR_CAST(kaddr); @@ -371,18 +369,18 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, if (de->rec_len == 0) { ext2_error(dir->i_sb, __func__, "zero-length directory entry"); - ext2_put_page(page, de); + folio_release_kmap(*foliop, de); goto out; } if (ext2_match(namelen, name, de)) goto found; de = ext2_next_entry(de); } - ext2_put_page(page, kaddr); + folio_release_kmap(*foliop, kaddr); if (++n >= npages) n = 0; - /* next page is past the blocks we've got */ + /* next folio is past the blocks we've got */ if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { ext2_error(dir->i_sb, __func__, "dir %lu size %lld exceeds block count %llu", @@ -395,7 +393,6 @@ out: return ERR_PTR(-ENOENT); found: - *res_page = page; ei->i_dir_start_lookup = n; return de; } @@ -404,17 +401,18 @@ found: * Return the '..' directory entry and the page in which the entry was found * (as a parameter - p). * - * On Success ext2_put_page() should be called on *p. + * On Success folio_release_kmap() should be called on *foliop. * - * NOTE: Calls to ext2_get_page()/ext2_put_page() must be nested according to - * the rules documented in kmap_local_page()/kunmap_local(). + * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested + * according to the rules documented in kmap_local_folio()/kunmap_local(). * - * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_page() and - * should be treated as a call to ext2_get_page() for nesting purposes. + * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio() + * and should be treated as a call to ext2_get_folio() for nesting + * purposes. */ -struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p) +struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct folio **foliop) { - ext2_dirent *de = ext2_get_page(dir, 0, 0, p); + ext2_dirent *de = ext2_get_folio(dir, 0, 0, foliop); if (!IS_ERR(de)) return ext2_next_entry(de); @@ -424,23 +422,22 @@ struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p) int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino) { struct ext2_dir_entry_2 *de; - struct page *page; - - de = ext2_find_entry(dir, child, &page); + struct folio *folio; + + de = ext2_find_entry(dir, child, &folio); if (IS_ERR(de)) return PTR_ERR(de); *ino = le32_to_cpu(de->inode); - ext2_put_page(page, de); + folio_release_kmap(folio, de); return 0; } -static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len) +static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) { - return __block_write_begin(page, pos, len, ext2_get_block); + return __block_write_begin(&folio->page, pos, len, ext2_get_block); } - static int ext2_handle_dirsync(struct inode *dir) { int err; @@ -452,21 +449,21 @@ static int ext2_handle_dirsync(struct inode *dir) } int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, - struct page *page, struct inode *inode, bool update_times) + struct folio *folio, struct inode *inode, bool update_times) { - loff_t pos = page_offset(page) + offset_in_page(de); + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); unsigned len = ext2_rec_len_from_disk(de->rec_len); int err; - lock_page(page); - err = ext2_prepare_chunk(page, pos, len); + folio_lock(folio); + err = ext2_prepare_chunk(folio, pos, len); if (err) { - unlock_page(page); + folio_unlock(folio); return err; } de->inode = cpu_to_le32(inode->i_ino); ext2_set_de_type(de, inode); - ext2_commit_chunk(page, pos, len); + ext2_commit_chunk(folio, pos, len); if (update_times) inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; @@ -485,7 +482,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode) unsigned chunk_size = ext2_chunk_size(dir); unsigned reclen = EXT2_DIR_REC_LEN(namelen); unsigned short rec_len, name_len; - struct page *page = NULL; + struct folio *folio = NULL; ext2_dirent * de; unsigned long npages = dir_pages(dir); unsigned long n; @@ -494,19 +491,19 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode) /* * We take care of directory expansion in the same loop. - * This code plays outside i_size, so it locks the page + * This code plays outside i_size, so it locks the folio * to protect that region. */ for (n = 0; n <= npages; n++) { - char *kaddr = ext2_get_page(dir, n, 0, &page); + char *kaddr = ext2_get_folio(dir, n, 0, &folio); char *dir_end; if (IS_ERR(kaddr)) return PTR_ERR(kaddr); - lock_page(page); + folio_lock(folio); dir_end = kaddr + ext2_last_byte(dir, n); de = (ext2_dirent *)kaddr; - kaddr += PAGE_SIZE - reclen; + kaddr += folio_size(folio) - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -533,15 +530,15 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode) goto got_it; de = (ext2_dirent *) ((char *) de + rec_len); } - unlock_page(page); - ext2_put_page(page, kaddr); + folio_unlock(folio); + folio_release_kmap(folio, kaddr); } BUG(); return -EINVAL; got_it: - pos = page_offset(page) + offset_in_page(de); - err = ext2_prepare_chunk(page, pos, rec_len); + pos = folio_pos(folio) + offset_in_folio(folio, de); + err = ext2_prepare_chunk(folio, pos, rec_len); if (err) goto out_unlock; if (de->inode) { @@ -554,17 +551,17 @@ got_it: memcpy(de->name, name, namelen); de->inode = cpu_to_le32(inode->i_ino); ext2_set_de_type (de, inode); - ext2_commit_chunk(page, pos, rec_len); + ext2_commit_chunk(folio, pos, rec_len); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty(dir); err = ext2_handle_dirsync(dir); /* OFFSET_CACHE */ out_put: - ext2_put_page(page, de); + folio_release_kmap(folio, de); return err; out_unlock: - unlock_page(page); + folio_unlock(folio); goto out_put; } @@ -572,18 +569,21 @@ out_unlock: * ext2_delete_entry deletes a directory entry by merging it with the * previous entry. Page is up-to-date. */ -int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page) +int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct folio *folio) { - struct inode *inode = page->mapping->host; - char *kaddr = (char *)((unsigned long)dir & PAGE_MASK); - unsigned from = offset_in_page(dir) & ~(ext2_chunk_size(inode)-1); - unsigned to = offset_in_page(dir) + - ext2_rec_len_from_disk(dir->rec_len); + struct inode *inode = folio->mapping->host; + size_t from, to; + char *kaddr; loff_t pos; - ext2_dirent *pde = NULL; - ext2_dirent *de = (ext2_dirent *)(kaddr + from); + ext2_dirent *de, *pde = NULL; int err; + from = offset_in_folio(folio, dir); + to = from + ext2_rec_len_from_disk(dir->rec_len); + kaddr = (char *)dir - from; + from &= ~(ext2_chunk_size(inode)-1); + de = (ext2_dirent *)(kaddr + from); + while ((char*)de < (char*)dir) { if (de->rec_len == 0) { ext2_error(inode->i_sb, __func__, @@ -594,18 +594,18 @@ int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page) de = ext2_next_entry(de); } if (pde) - from = offset_in_page(pde); - pos = page_offset(page) + from; - lock_page(page); - err = ext2_prepare_chunk(page, pos, to - from); + from = offset_in_folio(folio, pde); + pos = folio_pos(folio) + from; + folio_lock(folio); + err = ext2_prepare_chunk(folio, pos, to - from); if (err) { - unlock_page(page); + folio_unlock(folio); return err; } if (pde) pde->rec_len = ext2_rec_len_to_disk(to - from); dir->inode = 0; - ext2_commit_chunk(page, pos, to - from); + ext2_commit_chunk(folio, pos, to - from); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty(inode); @@ -617,21 +617,21 @@ int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page) */ int ext2_make_empty(struct inode *inode, struct inode *parent) { - struct page *page = grab_cache_page(inode->i_mapping, 0); + struct folio *folio = filemap_grab_folio(inode->i_mapping, 0); unsigned chunk_size = ext2_chunk_size(inode); struct ext2_dir_entry_2 * de; int err; void *kaddr; - if (!page) - return -ENOMEM; + if (IS_ERR(folio)) + return PTR_ERR(folio); - err = ext2_prepare_chunk(page, 0, chunk_size); + err = ext2_prepare_chunk(folio, 0, chunk_size); if (err) { - unlock_page(page); + folio_unlock(folio); goto fail; } - kaddr = kmap_local_page(page); + kaddr = kmap_local_folio(folio, 0); memset(kaddr, 0, chunk_size); de = (struct ext2_dir_entry_2 *)kaddr; de->name_len = 1; @@ -647,26 +647,26 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) memcpy (de->name, "..\0", 4); ext2_set_de_type (de, inode); kunmap_local(kaddr); - ext2_commit_chunk(page, 0, chunk_size); + ext2_commit_chunk(folio, 0, chunk_size); err = ext2_handle_dirsync(inode); fail: - put_page(page); + folio_put(folio); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ -int ext2_empty_dir (struct inode * inode) +int ext2_empty_dir(struct inode *inode) { - struct page *page; + struct folio *folio; char *kaddr; unsigned long i, npages = dir_pages(inode); for (i = 0; i < npages; i++) { ext2_dirent *de; - kaddr = ext2_get_page(inode, i, 0, &page); + kaddr = ext2_get_folio(inode, i, 0, &folio); if (IS_ERR(kaddr)) return 0; @@ -695,12 +695,12 @@ int ext2_empty_dir (struct inode * inode) } de = ext2_next_entry(de); } - ext2_put_page(page, kaddr); + folio_release_kmap(folio, kaddr); } return 1; not_empty: - ext2_put_page(page, kaddr); + folio_release_kmap(folio, kaddr); return 0; } |