Commit eebd2aa3 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user

Simplify page cache zeroing of segments of pages through 3 functions

zero_user_segments(page, start1, end1, start2, end2)

        Zeros two segments of the page. It takes the position where to
        start and end the zeroing which avoids length calculations and
	makes code clearer.

zero_user_segment(page, start, end)

        Same for a single segment.

zero_user(page, start, length)

        Length variant for the case where we know the length.

We remove the zero_user_page macro. Issues:

1. Its a macro. Inline functions are preferable.

2. The KM_USER0 macro is only defined for HIGHMEM.

   Having to treat this special case everywhere makes the
   code needlessly complex. The parameter for zeroing is always
   KM_USER0 except in one single case that we open code.

Avoiding KM_USER0 makes a lot of code not having to be dealing
with the special casing for HIGHMEM anymore. Dealing with
kmap is only necessary for HIGHMEM configurations. In those
configurations we use KM_USER0 like we do for a series of other
functions defined in highmem.h.

Since KM_USER0 is depends on HIGHMEM the existing zero_user_page
function could not be a macro. zero_user_* functions introduced
here can be be inline because that constant is not used when these
functions are called.

Also extract the flushing of the caches to be outside of the kmap.

[akpm@linux-foundation.org: fix nfs and ntfs build]
[akpm@linux-foundation.org: fix ntfs build some more]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: <linux-ext4@vger.kernel.org>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: David Chinner <dgc@sgi.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b98348bd
...@@ -1798,7 +1798,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) ...@@ -1798,7 +1798,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
start = max(from, block_start); start = max(from, block_start);
size = min(to, block_end) - start; size = min(to, block_end) - start;
zero_user_page(page, start, size, KM_USER0); zero_user(page, start, size);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
...@@ -1861,19 +1861,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1861,19 +1861,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
continue; continue;
} }
if (block_end > to || block_start < from) { if (block_end > to || block_start < from)
void *kaddr; zero_user_segments(page,
to, block_end,
kaddr = kmap_atomic(page, KM_USER0); block_start, from);
if (block_end > to)
memset(kaddr+to, 0,
block_end-to);
if (block_start < from)
memset(kaddr+block_start,
0, from-block_start);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
continue; continue;
} }
} }
...@@ -2104,8 +2095,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) ...@@ -2104,8 +2095,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
SetPageError(page); SetPageError(page);
} }
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
zero_user_page(page, i * blocksize, blocksize, zero_user(page, i * blocksize, blocksize);
KM_USER0);
if (!err) if (!err)
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
...@@ -2218,7 +2208,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping, ...@@ -2218,7 +2208,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
&page, &fsdata); &page, &fsdata);
if (err) if (err)
goto out; goto out;
zero_user_page(page, zerofrom, len, KM_USER0); zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len, err = pagecache_write_end(file, mapping, curpos, len, len,
page, fsdata); page, fsdata);
if (err < 0) if (err < 0)
...@@ -2245,7 +2235,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping, ...@@ -2245,7 +2235,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
&page, &fsdata); &page, &fsdata);
if (err) if (err)
goto out; goto out;
zero_user_page(page, zerofrom, len, KM_USER0); zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len, err = pagecache_write_end(file, mapping, curpos, len, len,
page, fsdata); page, fsdata);
if (err < 0) if (err < 0)
...@@ -2422,7 +2412,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping, ...@@ -2422,7 +2412,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
unsigned block_in_page; unsigned block_in_page;
unsigned block_start, block_end; unsigned block_start, block_end;
sector_t block_in_file; sector_t block_in_file;
char *kaddr;
int nr_reads = 0; int nr_reads = 0;
int ret = 0; int ret = 0;
int is_mapped_to_disk = 1; int is_mapped_to_disk = 1;
...@@ -2493,13 +2482,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping, ...@@ -2493,13 +2482,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
continue; continue;
} }
if (buffer_new(bh) || !buffer_mapped(bh)) { if (buffer_new(bh) || !buffer_mapped(bh)) {
kaddr = kmap_atomic(page, KM_USER0); zero_user_segments(page, block_start, from,
if (block_start < from) to, block_end);
memset(kaddr+block_start, 0, from-block_start);
if (block_end > to)
memset(kaddr + to, 0, block_end - to);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
continue; continue;
} }
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
...@@ -2636,7 +2620,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, ...@@ -2636,7 +2620,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); zero_user_segment(page, offset, PAGE_CACHE_SIZE);
out: out:
ret = mpage_writepage(page, get_block, wbc); ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN) if (ret == -EAGAIN)
...@@ -2709,7 +2693,7 @@ has_buffers: ...@@ -2709,7 +2693,7 @@ has_buffers:
if (page_has_buffers(page)) if (page_has_buffers(page))
goto has_buffers; goto has_buffers;
} }
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
set_page_dirty(page); set_page_dirty(page);
err = 0; err = 0;
...@@ -2785,7 +2769,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2785,7 +2769,7 @@ int block_truncate_page(struct address_space *mapping,
goto unlock; goto unlock;
} }
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
err = 0; err = 0;
...@@ -2831,7 +2815,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, ...@@ -2831,7 +2815,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); zero_user_segment(page, offset, PAGE_CACHE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc); return __block_write_full_page(inode, page, get_block, wbc);
} }
......
...@@ -1386,7 +1386,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from) ...@@ -1386,7 +1386,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0); zero_user_segment(page, offset, PAGE_CACHE_SIZE);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
return rc; return rc;
......
...@@ -878,8 +878,8 @@ do_holes: ...@@ -878,8 +878,8 @@ do_holes:
page_cache_release(page); page_cache_release(page);
goto out; goto out;
} }
zero_user_page(page, block_in_page << blkbits, zero_user(page, block_in_page << blkbits,
1 << blkbits, KM_USER0); 1 << blkbits);
dio->block_in_file++; dio->block_in_file++;
block_in_page++; block_in_page++;
goto next_block; goto next_block;
......
...@@ -257,8 +257,7 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) ...@@ -257,8 +257,7 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
if (to > end_byte_in_page) if (to > end_byte_in_page)
end_byte_in_page = to; end_byte_in_page = to;
zero_user_page(page, end_byte_in_page, zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
out: out:
return 0; return 0;
} }
...@@ -307,7 +306,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page, ...@@ -307,7 +306,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
*/ */
if ((i_size_read(page->mapping->host) == prev_page_end_size) && if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
(from != 0)) { (from != 0)) {
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); zero_user(page, 0, PAGE_CACHE_SIZE);
} }
out: out:
return rc; return rc;
......
...@@ -1845,7 +1845,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, ...@@ -1845,7 +1845,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
*/ */
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
ext3_should_writeback_data(inode) && PageUptodate(page)) { ext3_should_writeback_data(inode) && PageUptodate(page)) {
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
set_page_dirty(page); set_page_dirty(page);
goto unlock; goto unlock;
} }
...@@ -1898,7 +1898,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, ...@@ -1898,7 +1898,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
goto unlock; goto unlock;
} }
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block"); BUFFER_TRACE(bh, "zeroed end of block");
err = 0; err = 0;
......
...@@ -1840,7 +1840,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page, ...@@ -1840,7 +1840,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
*/ */
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
ext4_should_writeback_data(inode) && PageUptodate(page)) { ext4_should_writeback_data(inode) && PageUptodate(page)) {
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
set_page_dirty(page); set_page_dirty(page);
goto unlock; goto unlock;
} }
...@@ -1893,7 +1893,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page, ...@@ -1893,7 +1893,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
goto unlock; goto unlock;
} }
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block"); BUFFER_TRACE(bh, "zeroed end of block");
......
...@@ -932,7 +932,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping) ...@@ -932,7 +932,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping)
if (!gfs2_is_writeback(ip)) if (!gfs2_is_writeback(ip))
gfs2_trans_add_bh(ip->i_gl, bh, 0); gfs2_trans_add_bh(ip->i_gl, bh, 0);
zero_user_page(page, offset, length, KM_USER0); zero_user(page, offset, length);
unlock: unlock:
unlock_page(page); unlock_page(page);
......
...@@ -446,7 +446,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) ...@@ -446,7 +446,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
* so we need to supply one here. It doesn't happen often. * so we need to supply one here. It doesn't happen often.
*/ */
if (unlikely(page->index)) { if (unlikely(page->index)) {
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); zero_user(page, 0, PAGE_CACHE_SIZE);
return 0; return 0;
} }
......
...@@ -341,13 +341,10 @@ int simple_prepare_write(struct file *file, struct page *page, ...@@ -341,13 +341,10 @@ int simple_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to) unsigned from, unsigned to)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
if (to - from != PAGE_CACHE_SIZE) { if (to - from != PAGE_CACHE_SIZE)
void *kaddr = kmap_atomic(page, KM_USER0); zero_user_segments(page,
memset(kaddr, 0, from); 0, from,
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); to, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
} }
return 0; return 0;
} }
......
...@@ -276,9 +276,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -276,9 +276,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
} }
if (first_hole != blocks_per_page) { if (first_hole != blocks_per_page) {
zero_user_page(page, first_hole << blkbits, zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
PAGE_CACHE_SIZE - (first_hole << blkbits),
KM_USER0);
if (first_hole == 0) { if (first_hole == 0) {
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
...@@ -571,8 +569,7 @@ page_is_mapped: ...@@ -571,8 +569,7 @@ page_is_mapped:
if (page->index > end_index || !offset) if (page->index > end_index || !offset)
goto confused; goto confused;
zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, zero_user_segment(page, offset, PAGE_CACHE_SIZE);
KM_USER0);
} }
/* /*
......
...@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data) ...@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
static static
int nfs_return_empty_page(struct page *page) int nfs_return_empty_page(struct page *page)
{ {
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); zero_user(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
return 0; return 0;
...@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) ...@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
pglen = PAGE_CACHE_SIZE - base; pglen = PAGE_CACHE_SIZE - base;
for (;;) { for (;;) {
if (remainder <= pglen) { if (remainder <= pglen) {
zero_user_page(*pages, base, remainder, KM_USER0); zero_user(*pages, base, remainder);
break; break;
} }
zero_user_page(*pages, base, pglen, KM_USER0); zero_user(*pages, base, pglen);
pages++; pages++;
remainder -= pglen; remainder -= pglen;
pglen = PAGE_CACHE_SIZE; pglen = PAGE_CACHE_SIZE;
...@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, ...@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
return PTR_ERR(new); return PTR_ERR(new);
} }
if (len < PAGE_CACHE_SIZE) if (len < PAGE_CACHE_SIZE)
zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); zero_user_segment(page, len, PAGE_CACHE_SIZE);
nfs_list_add_request(new, &one_request); nfs_list_add_request(new, &one_request);
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
...@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page) ...@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
goto out_error; goto out_error;
if (len < PAGE_CACHE_SIZE) if (len < PAGE_CACHE_SIZE)
zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); zero_user_segment(page, len, PAGE_CACHE_SIZE);
nfs_pageio_add_request(desc->pgio, new); nfs_pageio_add_request(desc->pgio, new);
return 0; return 0;
out_error: out_error:
......
...@@ -665,9 +665,7 @@ zero_page: ...@@ -665,9 +665,7 @@ zero_page:
* then we need to zero any uninitalised data. */ * then we need to zero any uninitalised data. */
if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
&& !PageUptodate(req->wb_page)) && !PageUptodate(req->wb_page))
zero_user_page(req->wb_page, req->wb_bytes, zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
PAGE_CACHE_SIZE - req->wb_bytes,
KM_USER0);
return req; return req;
} }
......
...@@ -87,13 +87,17 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) ...@@ -87,13 +87,17 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
/* Check for the current buffer head overflowing. */ /* Check for the current buffer head overflowing. */
if (unlikely(file_ofs + bh->b_size > init_size)) { if (unlikely(file_ofs + bh->b_size > init_size)) {
int ofs; int ofs;
void *kaddr;
ofs = 0; ofs = 0;
if (file_ofs < init_size) if (file_ofs < init_size)
ofs = init_size - file_ofs; ofs = init_size - file_ofs;
local_irq_save(flags); local_irq_save(flags);
zero_user_page(page, bh_offset(bh) + ofs, kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
bh->b_size - ofs, KM_BIO_SRC_IRQ); memset(kaddr + bh_offset(bh) + ofs, 0,
bh->b_size - ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
} else { } else {
...@@ -334,7 +338,7 @@ handle_hole: ...@@ -334,7 +338,7 @@ handle_hole:
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
clear_buffer_mapped(bh); clear_buffer_mapped(bh);
handle_zblock: handle_zblock:
zero_user_page(page, i * blocksize, blocksize, KM_USER0); zero_user(page, i * blocksize, blocksize);
if (likely(!err)) if (likely(!err))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head); } while (i++, iblock++, (bh = bh->b_this_page) != head);
...@@ -410,7 +414,7 @@ retry_readpage: ...@@ -410,7 +414,7 @@ retry_readpage:
/* Is the page fully outside i_size? (truncate in progress) */ /* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT)) { PAGE_CACHE_SHIFT)) {
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); zero_user(page, 0, PAGE_CACHE_SIZE);
ntfs_debug("Read outside i_size - truncated?"); ntfs_debug("Read outside i_size - truncated?");
goto done; goto done;
} }
...@@ -459,7 +463,7 @@ retry_readpage: ...@@ -459,7 +463,7 @@ retry_readpage:
* ok to ignore the compressed flag here. * ok to ignore the compressed flag here.
*/ */
if (unlikely(page->index > 0)) { if (unlikely(page->index > 0)) {
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); zero_user(page, 0, PAGE_CACHE_SIZE);
goto done; goto done;
} }
if (!NInoAttr(ni)) if (!NInoAttr(ni))
...@@ -788,8 +792,7 @@ lock_retry_remap: ...@@ -788,8 +792,7 @@ lock_retry_remap:
if (err == -ENOENT || lcn == LCN_ENOENT) { if (err == -ENOENT || lcn == LCN_ENOENT) {
bh->b_blocknr = -1; bh->b_blocknr = -1;
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
zero_user_page(page, bh_offset(bh), blocksize, zero_user(page, bh_offset(bh), blocksize);
KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
err = 0; err = 0;
continue; continue;
...@@ -1414,8 +1417,7 @@ retry_writepage: ...@@ -1414,8 +1417,7 @@ retry_writepage:
if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
/* The page straddles i_size. */ /* The page straddles i_size. */
unsigned int ofs = i_size & ~PAGE_CACHE_MASK; unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs, zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
KM_USER0);
} }
/* Handle mst protected attributes. */ /* Handle mst protected attributes. */
if (NInoMstProtected(ni)) if (NInoMstProtected(ni))
......
...@@ -565,7 +565,7 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -565,7 +565,7 @@ int ntfs_read_compressed_block(struct page *page)
if (xpage >= max_page) { if (xpage >= max_page) {
kfree(bhs); kfree(bhs);
kfree(pages); kfree(pages);
zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); zero_user(page, 0, PAGE_CACHE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?"); ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
......
...@@ -607,8 +607,8 @@ do_next_page: ...@@ -607,8 +607,8 @@ do_next_page:
ntfs_submit_bh_for_read(bh); ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { } else {
zero_user_page(page, bh_offset(bh), zero_user(page, bh_offset(bh),
blocksize, KM_USER0); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }
...@@ -683,9 +683,8 @@ map_buffer_cached: ...@@ -683,9 +683,8 @@ map_buffer_cached:
ntfs_submit_bh_for_read(bh); ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { } else {
zero_user_page(page, zero_user(page, bh_offset(bh),
bh_offset(bh), blocksize);
blocksize, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }
...@@ -703,8 +702,8 @@ map_buffer_cached: ...@@ -703,8 +702,8 @@ map_buffer_cached:
*/ */
if (bh_end <= pos || bh_pos >= end) { if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
zero_user_page(page, bh_offset(bh), zero_user(page, bh_offset(bh),
blocksize, KM_USER0); blocksize);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
...@@ -743,8 +742,7 @@ map_buffer_cached: ...@@ -743,8 +742,7 @@ map_buffer_cached:
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) { } else if (!buffer_uptodate(bh)) {
zero_user_page(page, bh_offset(bh), blocksize, zero_user(page, bh_offset(bh), blocksize);
KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
continue; continue;
...@@ -868,8 +866,8 @@ rl_not_mapped_enoent: ...@@ -868,8 +866,8 @@ rl_not_mapped_enoent: