Commit 606c61a0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge fourth patch-bomb from Andrew Morton:
 "A lot more stuff than expected, sorry.  A bunch of ocfs2 reviewing was
  finished off.

   - mhocko's oom-reaper out-of-memory-handler changes

   - ocfs2 fixes and features

   - KASAN feature work

   - various fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (42 commits)
  thp: fix typo in khugepaged_scan_pmd()
  MAINTAINERS: fill entries for KASAN
  mm/filemap: generic_file_read_iter(): check for zero reads unconditionally
  kasan: test fix: warn if the UAF could not be detected in kmalloc_uaf2
  mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
  arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections
  mm, kasan: add GFP flags to KASAN API
  mm, kasan: SLAB support
  kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right()
  include/linux/oom.h: remove undefined oom_kills_count()/note_oom_kill()
  mm/page_alloc: prevent merging between isolated and other pageblocks
  drivers/memstick/host/r592.c: avoid gcc-6 warning
  ocfs2: extend enough credits for freeing one truncate record while replaying truncate records
  ocfs2: extend transaction for ocfs2_remove_rightmost_path() and ocfs2_update_edge_lengths() before to avoid inconsistency between inode and et
  ocfs2/dlm: move lock to the tail of grant queue while doing in-place convert
  ocfs2: solve a problem of crossing the boundary in updating backups
  ocfs2: fix occurring deadlock by changing ocfs2_wq from global to local
  ocfs2/dlm: fix BUG in dlm_move_lockres_to_recovery_list
  ocfs2/dlm: fix race between convert and recovery
  ocfs2: fix a deadlock issue in ocfs2_dio_end_io_write()
  ...
parents 15dbc136 0fda2788
......@@ -12,8 +12,7 @@ KASAN uses compile-time instrumentation for checking every memory access,
therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
required for detection of out-of-bounds accesses to stack or global variables.
Currently KASAN is supported only for x86_64 architecture and requires the
kernel to be built with the SLUB allocator.
Currently KASAN is supported only for x86_64 architecture.
1. Usage
========
......@@ -27,7 +26,7 @@ inline are compiler instrumentation types. The former produces smaller binary
the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
version 5.0 or later.
Currently KASAN works only with the SLUB memory allocator.
KASAN works with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
To disable instrumentation for specific files or directories, add a line
......
......@@ -6165,6 +6165,20 @@ S: Maintained
F: Documentation/hwmon/k8temp
F: drivers/hwmon/k8temp.c
KASAN
M: Andrey Ryabinin <aryabinin@virtuozzo.com>
R: Alexander Potapenko <glider@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: arch/*/include/asm/kasan.h
F: arch/*/mm/kasan_init*
F: Documentation/kasan.txt
F: include/linux/kasan.h
F: lib/test_kasan.c
F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org
......
......@@ -7,7 +7,7 @@
#ifndef __ASM_ARM_EXCEPTION_H
#define __ASM_ARM_EXCEPTION_H
#include <linux/ftrace.h>
#include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
......
......@@ -108,6 +108,7 @@ SECTIONS
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
......
......@@ -18,7 +18,7 @@
#ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H
#include <linux/ftrace.h>
#include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
......
......@@ -103,6 +103,7 @@ SECTIONS
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
......
......@@ -35,6 +35,7 @@ SECTIONS
#endif
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
KPROBES_TEXT
#ifdef CONFIG_ROMKERNEL
__sinittext = .;
......
......@@ -72,6 +72,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
......
......@@ -24,6 +24,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.*)
*(.gnu.warning)
}
......
......@@ -36,6 +36,7 @@ SECTIONS {
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
. = ALIGN (4) ;
_etext = . ;
}
......
......@@ -58,6 +58,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.*)
*(.fixup)
*(.gnu.warning)
......
......@@ -39,6 +39,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
KPROBES_TEXT
} =0
_etext = .;
......
......@@ -50,6 +50,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.text.__*)
_etext = .;
......
......@@ -72,6 +72,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
......
......@@ -55,6 +55,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
#ifdef CONFIG_PPC32
*(.got1)
......
......@@ -28,6 +28,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
} :text = 0x0700
......
......@@ -39,6 +39,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
_etext = .; /* End of text section */
......
......@@ -48,6 +48,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.gnu.warning)
} = 0
_etext = .;
......
......@@ -45,6 +45,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
__fix_text_end = .; /* tile-cpack won't rearrange before this */
ALIGN_FUNCTION();
*(.hottext*)
......
......@@ -19,6 +19,7 @@ endif
KASAN_SANITIZE_head$(BITS).o := n
KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
......
......@@ -101,6 +101,7 @@ SECTIONS
KPROBES_TEXT
ENTRY_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
/* End of text section */
......
......@@ -17,7 +17,7 @@
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
......@@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer,
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
......@@ -65,7 +65,7 @@ int input_event_to_user(char __user *buffer,
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (INPUT_COMPAT_TEST) {
if (in_compat_syscall()) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
......
......@@ -17,8 +17,6 @@
#ifdef CONFIG_COMPAT
#define INPUT_COMPAT_TEST in_compat_syscall()
struct input_event_compat {
struct compat_timeval time;
__u16 type;
......@@ -57,7 +55,7 @@ struct ff_effect_compat {
static inline size_t input_event_size(void)
{
return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ?
return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ?
sizeof(struct input_event_compat) : sizeof(struct input_event);
}
......
......@@ -1015,7 +1015,7 @@ static int input_bits_to_string(char *buf, int buf_size,
{
int len = 0;
if (INPUT_COMPAT_TEST) {
if (in_compat_syscall()) {
u32 dword = bits >> 32;
if (dword || !skip_empty)
len += snprintf(buf, buf_size, "%x ", dword);
......
......@@ -664,7 +664,7 @@ struct uinput_ff_upload_compat {
static int uinput_ff_upload_to_user(char __user *buffer,
const struct uinput_ff_upload *ff_up)
{
if (INPUT_COMPAT_TEST) {
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
ff_up_compat.request_id = ff_up->request_id;
......@@ -695,7 +695,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
static int uinput_ff_upload_from_user(const char __user *buffer,
struct uinput_ff_upload *ff_up)
{
if (INPUT_COMPAT_TEST) {
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
if (copy_from_user(&ff_up_compat, buffer,
......
......@@ -298,8 +298,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (sg_count != 1 ||
(sg_dma_len(&dev->req->sg) < dev->req->sg.length)) {
if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
message("problem in dma_map_sg");
return -EIO;
}
......
......@@ -2516,21 +2516,6 @@ static int ocfs2_update_edge_lengths(handle_t *handle,
struct ocfs2_extent_block *eb;
u32 range;
/*
* In normal tree rotation process, we will never touch the
* tree branch above subtree_index and ocfs2_extend_rotate_transaction
* doesn't reserve the credits for them either.
*
* But we do have a special case here which will update the rightmost
* records for all the bh in the path.
* So we have to allocate extra credits and access them.
*/
ret = ocfs2_extend_trans(handle, subtree_index);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
......@@ -2956,7 +2941,7 @@ static int __ocfs2_rotate_tree_left(handle_t *handle,
right_path->p_node[subtree_root].bh->b_blocknr,
right_path->p_tree_depth);
ret = ocfs2_extend_rotate_transaction(handle, subtree_root,
ret = ocfs2_extend_rotate_transaction(handle, 0,
orig_credits, left_path);
if (ret) {
mlog_errno(ret);
......@@ -3029,21 +3014,9 @@ static int ocfs2_remove_rightmost_path(handle_t *handle,
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
ret = ocfs2_et_sanity_check(et);
if (ret)
goto out;
/*
* There's two ways we handle this depending on
* whether path is the only existing one.
*/
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
......@@ -3641,6 +3614,14 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
*/
if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
le16_to_cpu(el->l_next_free_rec) == 1) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
right_path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_remove_rightmost_path(handle, et,
right_path,
......@@ -3679,6 +3660,14 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
path);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* The merge code will need to create an empty
* extent to take the place of the newly
......@@ -3727,6 +3716,15 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
*/
BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
path);
if (ret) {
mlog_errno(ret);
goto out;
}
/* The merge left us with an empty extent, remove it. */
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
......@@ -3748,6 +3746,15 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
goto out;
}
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
/*
* Error from this last rotate is not critical, so
......@@ -3783,6 +3790,16 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
}
if (ctxt->c_split_covers_rec) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
path);
if (ret) {
mlog_errno(ret);
ret = 0;
goto out;
}
/*
* The merge may have left an empty extent in
* our leaf. Try to rotate it away.
......@@ -5342,6 +5359,15 @@ static int ocfs2_truncate_rec(handle_t *handle,
struct ocfs2_extent_block *eb;
if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
handle->h_buffer_credits,
path);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
......@@ -5928,16 +5954,6 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
ocfs2_journal_dirty(handle, tl_bh);
/* TODO: Perhaps we can calculate the bulk of the
* credits up front rather than extending like
* this. */
status = ocfs2_extend_trans(handle,
OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
if (status < 0) {
mlog_errno(status);
goto bail;
}
rec = tl->tl_recs[i];
start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
le32_to_cpu(rec.t_start));
......@@ -5958,6 +5974,13 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
goto bail;
}
}
status = ocfs2_extend_trans(handle,
OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
if (status < 0) {
mlog_errno(status);
goto bail;
}
i--;
}
......@@ -6016,7 +6039,7 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
goto out_mutex;
}
handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
......@@ -6079,7 +6102,7 @@ void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
if (cancel)
cancel_delayed_work(&osb->osb_truncate_log_wq);
queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq,
queue_delayed_work(osb->ocfs2_wq, &osb->osb_truncate_log_wq,
OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
}
}
......@@ -6253,7 +6276,7 @@ void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
if (tl_inode) {
cancel_delayed_work(&osb->osb_truncate_log_wq);
flush_workqueue(ocfs2_wq);
flush_workqueue(osb->ocfs2_wq);
status = ocfs2_flush_truncate_log(osb);
if (status < 0)
......
This diff is collapsed.
......@@ -47,9 +47,14 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
int ocfs2_write_begin_nolock(struct file *filp,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
typedef enum {
OCFS2_WRITE_BUFFER = 0,
OCFS2_WRITE_DIRECT,
OCFS2_WRITE_MMAP,
} ocfs2_write_type_t;
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct page **pagep, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page);
......@@ -79,7 +84,6 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
enum ocfs2_iocb_lock_bits {
OCFS2_IOCB_RW_LOCK = 0,
OCFS2_IOCB_RW_LOCK_LEVEL,
OCFS2_IOCB_UNALIGNED_IO,
OCFS2_IOCB_NUM_LOCKS
};
......@@ -88,11 +92,4 @@ enum ocfs2_iocb_lock_bits {
#define ocfs2_iocb_rw_locked_level(iocb) \
test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private)
#define ocfs2_iocb_set_unaligned_aio(iocb) \
set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
#define ocfs2_iocb_clear_unaligned_aio(iocb) \
clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
#define ocfs2_iocb_is_unaligned_aio(iocb) \
test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
#endif /* OCFS2_FILE_H */
......@@ -1444,8 +1444,8 @@ static void o2hb_region_release(struct config_item *item)
debugfs_remove(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
kfree(reg->hr_debug_elapsed_time);
kfree(reg->hr_debug_pinned);
kfree(reg->hr_db_elapsed_time);
kfree(reg->hr_db_pinned);
spin_lock(&o2hb_live_lock);
list_del(&reg->hr_all_item);
......
......@@ -212,6 +212,12 @@ grant:
if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
/*
* Move the lock to the tail because it may be the only lock which has
* an invalid lvb.
*/
list_move_tail(&lock->list, &res->granted);
status = DLM_NORMAL;
*call_ast = 1;
goto unlock_exit;
......@@ -262,6 +268,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
struct dlm_lock *lock, int flags, int type)
{
enum dlm_status status;
u8 old_owner = res->owner;
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
......@@ -287,6 +294,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
status = DLM_DENIED;
goto bail;
}
if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
mlog(0, "last convert request returned DLM_RECOVERING, but "
"owner has already queued and sent ast to me. res %.*s, "
"(cookie=%u:%llu, type=%d, conv=%d)\n",
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ml.type, lock->ml.convert_type);
status = DLM_NORMAL;
goto bail;
}
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* move lock to local convert queue */
/* do not alter lock refcount. switching lists. */
......@@ -316,11 +336,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->convert_pending = 0;
/* if it failed, move it back to granted queue */
/* if it failed, move it back to granted queue.
* if master returns DLM_NORMAL and then down before sending ast,
* it may have already been moved to granted queue, reset to
* DLM_RECOVERING and retry convert */
if (status != DLM_NORMAL) {
if (status != DLM_NOTQUEUED)
dlm_error(status);
dlm_revert_pending_convert(res, lock);
} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
(old_owner != res->owner)) {
mlog(0, "res %.*s is in recovering or has been recovered.\n",
res->lockname.len, res->lockname.name);
status = DLM_RECOVERING;
}
bail:
spin_unlock(&res->spinlock);
......
......@@ -2083,7 +2083,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
dlm_lock_get(lock);
if (lock->convert_pending) {
/* move converting lock back to granted */
BUG_ON(i != DLM_CONVERTING_LIST);
mlog(0, "node died with convert pending "
"on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
......
......@@ -1381,44 +1381,6 @@ out:
return ret;
}
/*
* Will look for holes and unwritten extents in the range starting at
* pos for count bytes (inclusive).
*/
static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
size_t count)
{
int ret = 0;
unsigned int extent_flags;
u32 cpos, clusters, extent_len, phys_cpos;
struct super_block *sb = inode->i_sb;
cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
while (clusters) {
ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
&extent_flags);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
ret = 1;
break;
}
if (extent_len > clusters)
extent_len = clusters;
clusters -= extent_len;
cpos += extent_len;
}
out:
return ret;
}
static int ocfs2_write_remove_suid(struct inode *inode)
{
int ret;
......@@ -2129,18 +2091,12 @@ out:
static int ocfs2_prepare_inode_for_write(struct file *file,
loff_t pos,
size_t count,
int appending,
int *direct_io,
int *has_refcount)
size_t count)
{
int ret = 0, meta_level = 0;
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
loff_t end;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
/*
* We start with a read level meta lock and only jump to an ex
......@@ -2189,10 +2145,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
pos,
count,
&meta_level);
if (has_refcount)
*has_refcount = 1;
if (direct_io)
*direct_io = 0;
}
if (ret < 0) {
......@@ -2200,67 +2152,12 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
goto out_unlock;
}
/*
* Skip the O_DIRECT checks if we don't need
* them.
*/
if (!direct_io || !(*direct_io))
break;
/*
* There's no sane way to do direct writes to an inode
* with inline data.
*/
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
*direct_io = 0;
break;
}
/*
* Allowing concurrent direct writes means
* i_size changes wouldn't be synchronized, so
* one node could wind up truncating another
* nodes writes.
*/
if (end > i_size_read(inode) && !full_coherency) {
*direct_io = 0;
break;
}
/*
* Fallback to old way if the feature bit is not set.
*/