summaryrefslogtreecommitdiff
path: root/fs/hfsplus/bnode.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-05-11 00:00:29 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-05-11 00:00:29 -0400
commite4d35be584be88a3db3fa5635a97c62a2ec5aafe (patch)
treefc22a7fb65697306edd71411959ccee6df60c64d /fs/hfsplus/bnode.c
parent99d825822eade8d827a1817357cbf3f889a552d6 (diff)
parent38b78a5f18584db6fa7441e0f4531b283b0e6725 (diff)
Merge branch 'ovl-fixes' into for-linus
Diffstat (limited to 'fs/hfsplus/bnode.c')
-rw-r--r--fs/hfsplus/bnode.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 63924662aaf3..ce014ceb89ef 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
int l;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
- off &= ~PAGE_CACHE_MASK;
+ pagep = node->page + (off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_CACHE_SIZE - off);
+ l = min_t(int, len, PAGE_SIZE - off);
memcpy(buf, kmap(*pagep) + off, l);
kunmap(*pagep);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memcpy(buf, kmap(*++pagep), l);
kunmap(*pagep);
}
@@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
int l;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
- off &= ~PAGE_CACHE_MASK;
+ pagep = node->page + (off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_CACHE_SIZE - off);
+ l = min_t(int, len, PAGE_SIZE - off);
memcpy(kmap(*pagep) + off, buf, l);
set_page_dirty(*pagep);
kunmap(*pagep);
while ((len -= l) != 0) {
buf += l;
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memcpy(kmap(*++pagep), buf, l);
set_page_dirty(*pagep);
kunmap(*pagep);
@@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
int l;
off += node->page_offset;
- pagep = node->page + (off >> PAGE_CACHE_SHIFT);
- off &= ~PAGE_CACHE_MASK;
+ pagep = node->page + (off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
- l = min_t(int, len, PAGE_CACHE_SIZE - off);
+ l = min_t(int, len, PAGE_SIZE - off);
memset(kmap(*pagep) + off, 0, l);
set_page_dirty(*pagep);
kunmap(*pagep);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memset(kmap(*++pagep), 0, l);
set_page_dirty(*pagep);
kunmap(*pagep);
@@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
tree = src_node->tree;
src += src_node->page_offset;
dst += dst_node->page_offset;
- src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
- src &= ~PAGE_CACHE_MASK;
- dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
- dst &= ~PAGE_CACHE_MASK;
+ src_page = src_node->page + (src >> PAGE_SHIFT);
+ src &= ~PAGE_MASK;
+ dst_page = dst_node->page + (dst >> PAGE_SHIFT);
+ dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_CACHE_SIZE - src);
+ l = min_t(int, len, PAGE_SIZE - src);
memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
kunmap(*src_page);
set_page_dirty(*dst_page);
kunmap(*dst_page);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memcpy(kmap(*++dst_page), kmap(*++src_page), l);
kunmap(*src_page);
set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
do {
src_ptr = kmap(*src_page) + src;
dst_ptr = kmap(*dst_page) + dst;
- if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
- l = PAGE_CACHE_SIZE - src;
+ if (PAGE_SIZE - src < PAGE_SIZE - dst) {
+ l = PAGE_SIZE - src;
src = 0;
dst += l;
} else {
- l = PAGE_CACHE_SIZE - dst;
+ l = PAGE_SIZE - dst;
src += l;
dst = 0;
}
@@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
dst += node->page_offset;
if (dst > src) {
src += len - 1;
- src_page = node->page + (src >> PAGE_CACHE_SHIFT);
- src = (src & ~PAGE_CACHE_MASK) + 1;
+ src_page = node->page + (src >> PAGE_SHIFT);
+ src = (src & ~PAGE_MASK) + 1;
dst += len - 1;
- dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
- dst = (dst & ~PAGE_CACHE_MASK) + 1;
+ dst_page = node->page + (dst >> PAGE_SHIFT);
+ dst = (dst & ~PAGE_MASK) + 1;
if (src == dst) {
while (src < len) {
@@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
set_page_dirty(*dst_page);
kunmap(*dst_page);
len -= src;
- src = PAGE_CACHE_SIZE;
+ src = PAGE_SIZE;
src_page--;
dst_page--;
}
@@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
dst_ptr = kmap(*dst_page) + dst;
if (src < dst) {
l = src;
- src = PAGE_CACHE_SIZE;
+ src = PAGE_SIZE;
dst -= l;
} else {
l = dst;
src -= l;
- dst = PAGE_CACHE_SIZE;
+ dst = PAGE_SIZE;
}
l = min(len, l);
memmove(dst_ptr - l, src_ptr - l, l);
kunmap(*src_page);
set_page_dirty(*dst_page);
kunmap(*dst_page);
- if (dst == PAGE_CACHE_SIZE)
+ if (dst == PAGE_SIZE)
dst_page--;
else
src_page--;
} while ((len -= l));
}
} else {
- src_page = node->page + (src >> PAGE_CACHE_SHIFT);
- src &= ~PAGE_CACHE_MASK;
- dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
- dst &= ~PAGE_CACHE_MASK;
+ src_page = node->page + (src >> PAGE_SHIFT);
+ src &= ~PAGE_MASK;
+ dst_page = node->page + (dst >> PAGE_SHIFT);
+ dst &= ~PAGE_MASK;
if (src == dst) {
- l = min_t(int, len, PAGE_CACHE_SIZE - src);
+ l = min_t(int, len, PAGE_SIZE - src);
memmove(kmap(*dst_page) + src,
kmap(*src_page) + src, l);
kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
kunmap(*dst_page);
while ((len -= l) != 0) {
- l = min_t(int, len, PAGE_CACHE_SIZE);
+ l = min_t(int, len, PAGE_SIZE);
memmove(kmap(*++dst_page),
kmap(*++src_page), l);
kunmap(*src_page);
@@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
do {
src_ptr = kmap(*src_page) + src;
dst_ptr = kmap(*dst_page) + dst;
- if (PAGE_CACHE_SIZE - src <
- PAGE_CACHE_SIZE - dst) {
- l = PAGE_CACHE_SIZE - src;
+ if (PAGE_SIZE - src <
+ PAGE_SIZE - dst) {
+ l = PAGE_SIZE - src;
src = 0;
dst += l;
} else {
- l = PAGE_CACHE_SIZE - dst;
+ l = PAGE_SIZE - dst;
src += l;
dst = 0;
}
@@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
mapping = tree->inode->i_mapping;
off = (loff_t)cnid << tree->node_size_shift;
- block = off >> PAGE_CACHE_SHIFT;
- node->page_offset = off & ~PAGE_CACHE_MASK;
+ block = off >> PAGE_SHIFT;
+ node->page_offset = off & ~PAGE_MASK;
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
- page_cache_release(page);
+ put_page(page);
goto fail;
}
node->page[i] = page;
@@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
for (i = 0; i < node->tree->pages_per_bnode; i++)
if (node->page[i])
- page_cache_release(node->page[i]);
+ put_page(node->page[i]);
kfree(node);
}
@@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
pagep = node->page;
memset(kmap(*pagep) + node->page_offset, 0,
- min_t(int, PAGE_CACHE_SIZE, tree->node_size));
+ min_t(int, PAGE_SIZE, tree->node_size));
set_page_dirty(*pagep);
kunmap(*pagep);
for (i = 1; i < tree->pages_per_bnode; i++) {
- memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+ memset(kmap(*++pagep), 0, PAGE_SIZE);
set_page_dirty(*pagep);
kunmap(*pagep);
}