@@ -568,7 +568,6 @@ static void afs_extend_writeback(struct address_space *mapping,
}
folio_batch_release(&fbatch);
- cond_resched();
} while (!stop);
*_len = len;
@@ -790,7 +789,6 @@ static int afs_writepages_region(struct address_space *mapping,
}
folio_batch_release(&fbatch);
- cond_resched();
} while (wbc->nr_to_write > 0);
*_next = start;
@@ -823,7 +823,6 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
ulist_reinit(parents);
- cond_resched();
}
out:
/*
@@ -879,7 +878,6 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
- cond_resched();
}
return 0;
}
@@ -1676,7 +1674,6 @@ static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
*/
ref->inode_list = NULL;
}
- cond_resched();
}
out:
@@ -1784,7 +1781,6 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
if (!node)
break;
ctx->bytenr = node->val;
- cond_resched();
}
ulist_free(ctx->refs);
@@ -1993,7 +1989,6 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
}
shared.share_count = 0;
shared.have_delayed_delete_refs = false;
- cond_resched();
}
/*
@@ -3424,7 +3419,6 @@ int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int type;
- cond_resched();
eb = btrfs_backref_get_eb(iter);
key.objectid = iter->bytenr;
@@ -769,7 +769,6 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
btrfs_release_path(path);
up_read(&fs_info->commit_root_sem);
mutex_unlock(&caching_ctl->mutex);
- cond_resched();
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
goto next;
@@ -4066,8 +4065,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
wait_for_alloc = false;
spin_unlock(&space_info->lock);
}
-
- cond_resched();
} while (wait_for_alloc);
mutex_lock(&fs_info->chunk_mutex);
@@ -5052,7 +5052,6 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
*/
free_extent_buffer(next);
btrfs_release_path(path);
- cond_resched();
goto again;
}
if (!ret)
@@ -1326,7 +1326,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
ret = 0;
break;
}
- cond_resched();
}
if (ra_allocated)
@@ -4561,7 +4561,6 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->ordered_root_lock);
btrfs_destroy_ordered_extents(root);
- cond_resched();
spin_lock(&fs_info->ordered_root_lock);
}
spin_unlock(&fs_info->ordered_root_lock);
@@ -4643,7 +4642,6 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
}
btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
btrfs_put_delayed_ref_head(head);
- cond_resched();
spin_lock(&delayed_refs->lock);
}
btrfs_qgroup_destroy_extent_records(trans);
@@ -4759,7 +4757,6 @@ static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
free_extent_state(cached_state);
btrfs_error_unpin_extent_range(fs_info, start, end);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- cond_resched();
}
}
@@ -695,8 +695,6 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (start > end)
goto out;
spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
goto again;
out:
@@ -1189,8 +1187,6 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (start > end)
goto out;
spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
goto again;
out:
@@ -1409,7 +1405,6 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (start > end)
goto out;
spin_unlock(&tree->lock);
- cond_resched();
first_iteration = false;
goto again;
@@ -1996,7 +1996,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
}
btrfs_put_delayed_ref(ref);
- cond_resched();
spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
@@ -2074,7 +2073,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
locked_ref = NULL;
- cond_resched();
} while ((nr != -1 && count < nr) || locked_ref);
return 0;
@@ -2183,7 +2181,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
- cond_resched();
goto again;
}
out:
@@ -2805,7 +2802,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
unpin_extent_range(fs_info, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
free_extent_state(cached_state);
- cond_resched();
}
if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
@@ -4416,7 +4412,6 @@ static noinline int find_free_extent(struct btrfs_root *root,
goto have_block_group;
}
release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
- cond_resched();
}
up_read(&space_info->groups_sem);
@@ -5037,7 +5032,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
if (nread >= wc->reada_count)
break;
- cond_resched();
bytenr = btrfs_node_blockptr(eb, slot);
generation = btrfs_node_ptr_generation(eb, slot);
@@ -6039,8 +6033,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
ret = -ERESTARTSYS;
break;
}
-
- cond_resched();
}
return ret;
@@ -227,7 +227,6 @@ static void __process_pages_contig(struct address_space *mapping,
page_ops, start, end);
}
folio_batch_release(&fbatch);
- cond_resched();
}
}
@@ -291,7 +290,6 @@ static noinline int lock_delalloc_pages(struct inode *inode,
processed_end = page_offset(page) + PAGE_SIZE - 1;
}
folio_batch_release(&fbatch);
- cond_resched();
}
return 0;
@@ -401,7 +399,6 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
&cached_state);
__unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end);
- cond_resched();
goto again;
}
free_extent_state(cached_state);
@@ -1924,7 +1921,6 @@ int btree_write_cache_pages(struct address_space *mapping,
nr_to_write_done = wbc->nr_to_write <= 0;
}
folio_batch_release(&fbatch);
- cond_resched();
}
if (!scanned && !done) {
/*
@@ -2116,7 +2112,6 @@ static int extent_write_cache_pages(struct address_space *mapping,
wbc->nr_to_write <= 0);
}
folio_batch_release(&fbatch);
- cond_resched();
}
if (!scanned && !done) {
/*
@@ -2397,8 +2392,6 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
/* once for us */
free_extent_map(em);
-
- cond_resched(); /* Allow large-extent preemption. */
}
}
return try_release_extent_state(tree, page, mask);
@@ -2698,7 +2691,6 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
last_delalloc_end = delalloc_end;
cur_offset = delalloc_end + 1;
extent_offset += cur_offset - delalloc_start;
- cond_resched();
}
/*
@@ -2986,7 +2978,6 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
/* No more file extent items for this inode. */
break;
}
- cond_resched();
}
check_eof_delalloc:
@@ -1252,7 +1252,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
- cond_resched();
goto again;
}
out:
@@ -1405,8 +1405,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
- cond_resched();
-
pos += copied;
num_written += copied;
}
@@ -3376,7 +3374,6 @@ bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
prev_delalloc_end = delalloc_end;
cur_offset = delalloc_end + 1;
- cond_resched();
}
return ret;
@@ -3654,7 +3651,6 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
ret = -EINTR;
goto out;
}
- cond_resched();
}
/* We have an implicit hole from the last extent found up to i_size. */
@@ -3807,8 +3807,6 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
ret = -ERESTARTSYS;
break;
}
-
- cond_resched();
}
return ret;
@@ -4000,8 +3998,6 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
ret = -ERESTARTSYS;
break;
}
-
- cond_resched();
}
if (offset >= end)
@@ -1021,7 +1021,6 @@ static void compress_file_range(struct btrfs_work *work)
nr_pages, compress_type);
if (start + total_in < end) {
start += total_in;
- cond_resched();
goto again;
}
return;
@@ -3376,7 +3375,6 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
run_delayed_iput_locked(fs_info, inode);
if (need_resched()) {
spin_unlock_irq(&fs_info->delayed_iput_lock);
- cond_resched();
spin_lock_irq(&fs_info->delayed_iput_lock);
}
}
@@ -4423,7 +4421,6 @@ static void btrfs_prune_dentries(struct btrfs_root *root)
* cache when its usage count hits zero.
*/
iput(inode);
- cond_resched();
spin_lock(&root->inode_lock);
goto again;
}
@@ -5135,7 +5132,6 @@ static void evict_inode_truncate_pages(struct inode *inode)
EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
&cached_state);
- cond_resched();
spin_lock(&io_tree->lock);
}
spin_unlock(&io_tree->lock);
@@ -7209,8 +7205,6 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
if (ret)
break;
-
- cond_resched();
}
return ret;
@@ -9269,7 +9263,6 @@ static int start_delalloc_inodes(struct btrfs_root *root,
if (ret || wbc->nr_to_write <= 0)
goto out;
}
- cond_resched();
spin_lock(&root->delalloc_lock);
}
spin_unlock(&root->delalloc_lock);
@@ -10065,7 +10058,6 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
break;
btrfs_put_ordered_extent(ordered);
unlock_extent(io_tree, start, lockend, &cached_state);
- cond_resched();
}
em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1);
@@ -10306,7 +10298,6 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
if (ordered)
btrfs_put_ordered_extent(ordered);
unlock_extent(io_tree, start, end, &cached_state);
- cond_resched();
}
/*
@@ -715,7 +715,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
- cond_resched();
spin_lock(&root->ordered_extent_lock);
if (nr != U64_MAX)
nr--;
@@ -729,7 +728,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
list_del_init(&ordered->work_list);
wait_for_completion(&ordered->completion);
btrfs_put_ordered_extent(ordered);
- cond_resched();
}
mutex_unlock(&root->ordered_extent_mutex);
@@ -1926,7 +1926,6 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
}
- cond_resched();
return 0;
}
@@ -569,8 +569,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
ret = -EINTR;
goto out;
}
-
- cond_resched();
}
ret = 0;
@@ -1094,7 +1094,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
for (i = 0; i < nritems; i++) {
struct btrfs_ref ref = { 0 };
- cond_resched();
btrfs_item_key_to_cpu(leaf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
@@ -1531,7 +1530,6 @@ static int invalidate_extent_cache(struct btrfs_root *root,
while (1) {
struct extent_state *cached_state = NULL;
- cond_resched();
iput(inode);
if (objectid > max_key->objectid)
@@ -2163,7 +2161,6 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
next = node;
while (1) {
- cond_resched();
next = walk_up_backref(next, edges, &index);
root = next->root;
@@ -2286,7 +2283,6 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
next = node;
while (1) {
- cond_resched();
next = walk_up_backref(next, edges, &index);
root = next->root;
@@ -2331,7 +2327,6 @@ u64 calcu_metadata_size(struct reloc_control *rc,
BUG_ON(reserve && node->processed);
while (next) {
- cond_resched();
while (1) {
if (next->processed && (reserve || next != node))
break;
@@ -2426,8 +2421,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
list_for_each_entry(edge, &node->upper, list[LOWER]) {
struct btrfs_ref ref = { 0 };
- cond_resched();
-
upper = edge->node[UPPER];
root = select_reloc_root(trans, rc, upper, edges);
if (IS_ERR(root)) {
@@ -2609,7 +2602,6 @@ static void update_processed_blocks(struct reloc_control *rc,
int index = 0;
while (next) {
- cond_resched();
while (1) {
if (next->processed)
break;
@@ -3508,7 +3500,6 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
while (1) {
bool block_found;
- cond_resched();
if (rc->search_start >= last) {
ret = 1;
break;
@@ -2046,9 +2046,6 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
break;
cur_logical = found_logical + BTRFS_STRIPE_LEN;
-
- /* Don't hold CPU for too long time */
- cond_resched();
}
return ret;
}
@@ -7778,7 +7778,6 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
if (need_resched() ||
rwsem_is_contended(&fs_info->commit_root_sem)) {
up_read(&fs_info->commit_root_sem);
- cond_resched();
down_read(&fs_info->commit_root_sem);
}
@@ -1211,7 +1211,6 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
if (!to_reclaim)
to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
flush_space(fs_info, space_info, to_reclaim, flush, true);
- cond_resched();
spin_lock(&space_info->lock);
}
@@ -45,7 +45,6 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
folio_put(folio);
}
folio_batch_release(&fbatch);
- cond_resched();
loops++;
if (loops > 100000) {
printk(KERN_ERR
@@ -1115,7 +1115,6 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
werr = filemap_fdatawait_range(mapping, start, end);
free_extent_state(cached_state);
cached_state = NULL;
- cond_resched();
start = end + 1;
}
return werr;
@@ -1157,7 +1156,6 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
werr = err;
free_extent_state(cached_state);
cached_state = NULL;
- cond_resched();
start = end + 1;
}
if (err)
@@ -1507,7 +1505,6 @@ int btrfs_defrag_root(struct btrfs_root *root)
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(info);
- cond_resched();
if (btrfs_fs_closing(info) || ret != -EAGAIN)
break;
@@ -2657,11 +2657,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
path->nodes[*level-1] = next;
*level = btrfs_header_level(next);
path->slots[*level] = 0;
- cond_resched();
}
path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
- cond_resched();
return 0;
}
@@ -3898,7 +3896,6 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
if (need_resched()) {
btrfs_release_path(path);
- cond_resched();
goto search;
}
}
@@ -5037,7 +5034,6 @@ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
ins_nr++;
path->slots[0]++;
found_xattrs = true;
- cond_resched();
}
if (ins_nr > 0) {
ret = copy_items(trans, inode, dst_path, path,
@@ -5135,7 +5131,6 @@ static int btrfs_log_holes(struct btrfs_trans_handle *trans,
prev_extent_end = btrfs_file_extent_end(path);
path->slots[0]++;
- cond_resched();
}
if (prev_extent_end < i_size) {
@@ -5919,13 +5914,6 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
} else {
break;
}
-
- /*
- * We may process many leaves full of items for our inode, so
- * avoid monopolizing a cpu for too long by rescheduling while
- * not holding locks on any tree.
- */
- cond_resched();
}
if (ins_nr) {
ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
@@ -324,7 +324,6 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info)
ret = -EINTR;
goto out;
}
- cond_resched();
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
@@ -1689,7 +1689,6 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
search_start = extent_end;
next:
path->slots[0]++;
- cond_resched();
}
/*
@@ -4756,7 +4755,6 @@ int btrfs_uuid_scan_kthread(void *data)
} else {
break;
}
- cond_resched();
}
out:
@@ -1743,7 +1743,6 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
folio_unlock(folio);
}
folio_batch_release(&fbatch);
- cond_resched();
/* End of range already reached? */
if (index > end || !index)
break;
@@ -299,9 +299,7 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
fscache_withdraw_cookie(object->cookie);
count++;
if ((count & 63) == 0) {
- spin_unlock(&cache->object_list_lock);
- cond_resched();
- spin_lock(&cache->object_list_lock);
+ cond_resched_lock(&cache->object_list_lock);
}
}
@@ -353,7 +353,6 @@ int cachefiles_bury_object(struct cachefiles_cache *cache,
unlock_rename(cache->graveyard, dir);
dput(grave);
grave = NULL;
- cond_resched();
goto try_again;
}
@@ -62,7 +62,6 @@ void cachefiles_acquire_volume(struct fscache_volume *vcookie)
cachefiles_bury_object(cache, NULL, cache->store, vdentry,
FSCACHE_VOLUME_IS_WEIRD);
cachefiles_put_directory(volume->dentry);
- cond_resched();
goto retry;
}
}
@@ -1375,7 +1375,6 @@ static int ceph_writepages_start(struct address_space *mapping,
wait_on_page_writeback(page);
}
folio_batch_release(&fbatch);
- cond_resched();
}
}
@@ -986,7 +986,6 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
pfn_mkclean_range(pfn, count, index, vma);
- cond_resched();
}
i_mmap_unlock_read(mapping);
@@ -619,7 +619,6 @@ static void __dentry_kill(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
if (likely(can_free))
dentry_free(dentry);
- cond_resched();
}
static struct dentry *__lock_parent(struct dentry *dentry)
@@ -1629,7 +1628,6 @@ void shrink_dcache_parent(struct dentry *parent)
continue;
}
- cond_resched();
if (!data.found)
break;
data.victim = NULL;
@@ -261,7 +261,6 @@ void dlm_callback_resume(struct dlm_ls *ls)
sum += count;
if (!empty) {
count = 0;
- cond_resched();
goto more;
}
@@ -94,8 +94,6 @@ int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq)
if (error)
goto out_free;
- cond_resched();
-
/*
* pick namelen/name pairs out of received buffer
*/
@@ -1713,7 +1713,6 @@ void dlm_scan_rsbs(struct dlm_ls *ls)
shrink_bucket(ls, i);
if (dlm_locking_stopped(ls))
break;
- cond_resched();
}
}
@@ -5227,7 +5226,6 @@ void dlm_recover_purge(struct dlm_ls *ls)
}
unlock_rsb(r);
unhold_rsb(r);
- cond_resched();
}
up_write(&ls->ls_root_sem);
@@ -5302,7 +5300,6 @@ void dlm_recover_grant(struct dlm_ls *ls)
confirm_master(r, 0);
unlock_rsb(r);
put_rsb(r);
- cond_resched();
}
if (lkb_count)
@@ -562,7 +562,6 @@ int dlm_lowcomms_connect_node(int nodeid)
up_read(&con->sock_lock);
srcu_read_unlock(&connections_srcu, idx);
- cond_resched();
return 0;
}
@@ -1504,7 +1503,6 @@ static void process_recv_sockets(struct work_struct *work)
/* CF_RECV_PENDING cleared */
break;
case DLM_IO_RESCHED:
- cond_resched();
queue_work(io_workqueue, &con->rwork);
/* CF_RECV_PENDING not cleared */
break;
@@ -1650,7 +1648,6 @@ static void process_send_sockets(struct work_struct *work)
break;
case DLM_IO_RESCHED:
/* CF_SEND_PENDING not cleared */
- cond_resched();
queue_work(io_workqueue, &con->swork);
break;
default:
@@ -545,7 +545,6 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
else
error = recover_master(r, &count, seq);
unlock_rsb(r);
- cond_resched();
total++;
if (error) {
@@ -41,7 +41,6 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
iput(toput_inode);
toput_inode = inode;
- cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
@@ -93,7 +93,6 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
} else if (!erofs_workgroup_get(pre)) {
/* try to legitimize the current in-tree one */
xa_unlock(&sbi->managed_pslots);
- cond_resched();
goto repeat;
}
lockref_put_return(&grp->lockref);
@@ -697,8 +697,13 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio,
DBG_BUGON(stop > folio_size(folio) || stop < length);
if (offset == 0 && stop == folio_size(folio))
+ /*
+ * We are in a seemingly tight loop here. Though, if needed,
+ * preemption can happen in z_erofs_cache_release_folio()
+ * via the spin_unlock() call.
+ */
while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
- cond_resched();
+ ;
}
static const struct address_space_operations z_erofs_cache_aops = {
@@ -1527,7 +1532,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
oldpage, page)) {
erofs_pagepool_add(pagepool, page);
- cond_resched();
goto repeat;
}
out_tocache:
@@ -801,7 +801,6 @@ static void ep_clear_and_put(struct eventpoll *ep)
epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi);
- cond_resched();
}
/*
@@ -816,7 +815,6 @@ static void ep_clear_and_put(struct eventpoll *ep)
next = rb_next(rbp);
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove_safe(ep, epi);
- cond_resched();
}
dispose = ep_refcount_dec_and_test(ep);
@@ -1039,7 +1037,6 @@ static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long t
else
toff--;
}
- cond_resched();
}
return NULL;
@@ -451,7 +451,6 @@ static int count(struct user_arg_ptr argv, int max)
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
- cond_resched();
}
}
return i;
@@ -469,7 +468,6 @@ static int count_strings_kernel(const char *const *argv)
return -E2BIG;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
- cond_resched();
}
return i;
}
@@ -562,7 +560,6 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
ret = -ERESTARTNOHAND;
goto out;
}
- cond_resched();
offset = pos % PAGE_SIZE;
if (offset == 0)
@@ -661,7 +658,6 @@ static int copy_strings_kernel(int argc, const char *const *argv,
return ret;
if (fatal_signal_pending(current))
return -ERESTARTNOHAND;
- cond_resched();
}
return 0;
}
@@ -162,7 +162,6 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
return PTR_ERR(inode);
num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
while (i < num) {
- cond_resched();
map.m_lblk = i;
map.m_len = num - i;
n = ext4_map_blocks(NULL, inode, &map, 0);
@@ -224,7 +223,6 @@ int ext4_setup_system_zone(struct super_block *sb)
for (i=0; i < ngroups; i++) {
unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i);
- cond_resched();
if (meta_blks != 0) {
ret = add_system_zone(system_blks,
ext4_group_first_block_no(sb, i),
@@ -174,7 +174,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
err = -ERESTARTSYS;
goto errout;
}
- cond_resched();
offset = ctx->pos & (sb->s_blocksize - 1);
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
@@ -3001,7 +3001,6 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
}
/* Yield here to deal with large extent trees.
* Should be a no-op if we did IO above. */
- cond_resched();
if (WARN_ON(i + 1 > depth)) {
err = -EFSCORRUPTED;
break;
@@ -1482,7 +1482,6 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
if (!gdp)
continue;
desc_count += ext4_free_inodes_count(sb, gdp);
- cond_resched();
}
return desc_count;
#endif
@@ -2491,7 +2491,6 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
}
}
folio_batch_release(&fbatch);
- cond_resched();
}
mpd->scanned_until_end = 1;
if (handle)
@@ -2843,7 +2843,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
int ret = 0;
- cond_resched();
if (new_cr != cr) {
cr = new_cr;
goto repeat;
@@ -3387,7 +3386,6 @@ static int ext4_mb_init_backend(struct super_block *sb)
sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
for (i = 0; i < ngroups; i++) {
- cond_resched();
desc = ext4_get_group_desc(sb, i, NULL);
if (desc == NULL) {
ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
@@ -3746,7 +3744,6 @@ int ext4_mb_release(struct super_block *sb)
if (sbi->s_group_info) {
for (i = 0; i < ngroups; i++) {
- cond_resched();
grinfo = ext4_get_group_info(sb, i);
if (!grinfo)
continue;
@@ -6034,7 +6031,6 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
freed += ret;
needed -= ret;
- cond_resched();
}
if (needed > 0 && busy && ++retry < 3) {
@@ -6173,8 +6169,6 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
while (ar->len &&
ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
- /* let others to free the space */
- cond_resched();
ar->len = ar->len >> 1;
}
if (!ar->len) {
@@ -6720,7 +6714,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
for (i = 0; i < count; i++) {
- cond_resched();
if (is_metadata)
bh = sb_find_get_block(inode->i_sb, block + i);
ext4_forget(handle, is_metadata, inode, bh, block + i);
@@ -6959,8 +6952,11 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
return count;
if (need_resched()) {
+ /*
+ * Rescheduling can implicitly happen after the
+ * unlock.
+ */
ext4_unlock_group(sb, e4b->bd_group);
- cond_resched();
ext4_lock_group(sb, e4b->bd_group);
}
@@ -1255,7 +1255,6 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
err = -ERESTARTSYS;
goto errout;
}
- cond_resched();
block = dx_get_block(frame->at);
ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
start_hash, start_minor_hash);
@@ -1341,7 +1340,6 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
map_tail->size = ext4_rec_len_from_disk(de->rec_len,
blocksize);
count++;
- cond_resched();
}
de = ext4_next_entry(de, blocksize);
}
@@ -1658,7 +1656,6 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
/*
* We deal with the read-ahead logic here.
*/
- cond_resched();
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;
@@ -67,7 +67,6 @@ static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
atomic_inc(&oi->of_binfo[i].ob_free_entries);
return -ENOSPC;
}
- cond_resched();
}
while (bdata[j]) {
if (++j >= inodes_per_ob) {
@@ -3861,7 +3861,6 @@ static int ext4_lazyinit_thread(void *arg)
cur = jiffies;
if ((time_after_eq(cur, next_wakeup)) ||
(MAX_JIFFY_OFFSET == next_wakeup)) {
- cond_resched();
continue;
}
@@ -4226,7 +4225,6 @@ int ext4_calculate_overhead(struct super_block *sb)
overhead += blks;
if (blks)
memset(buf, 0, PAGE_SIZE);
- cond_resched();
}
/*
@@ -45,7 +45,6 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
repeat:
page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
- cond_resched();
goto repeat;
}
f2fs_wait_on_page_writeback(page, META, true, true);
@@ -76,7 +75,6 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
repeat:
page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
- cond_resched();
goto repeat;
}
if (PageUptodate(page))
@@ -463,7 +461,6 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
break;
}
folio_batch_release(&fbatch);
- cond_resched();
}
stop:
if (nwritten)
@@ -1111,9 +1108,13 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
F2FS_I(inode)->cp_task = NULL;
iput(inode);
- /* We need to give cpu to another writers. */
+ /*
+ * We need to give cpu to other writers but cond_resched_stall()
+ * does not guarantee that. Perhaps we should explicitly wait on
+ * an event or a timeout?
+ */
if (ino == cur_ino)
- cond_resched();
+ cond_resched_stall();
else
ino = cur_ino;
} else {
@@ -1122,7 +1123,6 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
* writebacking dentry pages in the freeing inode.
*/
f2fs_submit_merged_write(sbi, DATA);
- cond_resched();
}
goto retry;
}
@@ -1229,7 +1229,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
f2fs_quota_sync(sbi->sb, -1);
if (locked)
up_read(&sbi->sb->s_umount);
- cond_resched();
goto retry_flush_quotas;
}
@@ -1240,7 +1239,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true);
if (err)
return err;
- cond_resched();
goto retry_flush_quotas;
}
@@ -1256,7 +1254,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
err = f2fs_sync_inode_meta(sbi);
if (err)
return err;
- cond_resched();
goto retry_flush_quotas;
}
@@ -1273,7 +1270,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
f2fs_unlock_all(sbi);
return err;
}
- cond_resched();
goto retry_flush_nodes;
}
@@ -1941,7 +1941,6 @@ void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
folio_unlock(folio);
}
folio_batch_release(&fbatch);
- cond_resched();
} while (index < end);
}
@@ -2105,7 +2105,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
prep_next:
- cond_resched();
if (fatal_signal_pending(current))
ret = -EINTR;
else
@@ -3250,7 +3249,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
goto readd;
}
release_pages(pages, nr_pages);
- cond_resched();
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
/* flush remained pages in compress cluster */
@@ -3981,7 +3979,6 @@ static int check_swap_activate(struct swap_info_struct *sis,
while (cur_lblock < last_lblock && cur_lblock < sis->max) {
struct f2fs_map_blocks map;
retry:
- cond_resched();
memset(&map, 0, sizeof(map));
map.m_lblk = cur_lblock;
@@ -1090,7 +1090,6 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
err = -ERESTARTSYS;
goto out_free;
}
- cond_resched();
/* readahead for multi pages of dir */
if (npages - n > 1 && !ra_has_index(ra, n))
@@ -936,7 +936,6 @@ static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink
if (node_cnt + tree_cnt >= nr_shrink)
goto unlock_out;
- cond_resched();
}
mutex_unlock(&eti->extent_tree_lock);
@@ -2849,8 +2849,12 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *item)
{
+ /*
+ * Insert in a tight loop. The scheduler will
+ * preempt when necessary.
+ */
while (radix_tree_insert(root, index, item))
- cond_resched();
+ ;
}
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
@@ -3922,7 +3922,6 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
ret = -EINTR;
goto out;
}
- cond_resched();
}
if (len)
@@ -4110,7 +4109,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
count -= cluster_size;
page_idx += cluster_size;
- cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
@@ -4188,7 +4186,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
count -= cluster_size;
page_idx += cluster_size;
- cond_resched();
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
@@ -1579,7 +1579,6 @@ static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
unlock_page(page);
}
folio_batch_release(&fbatch);
- cond_resched();
}
return last_page;
}
@@ -1841,7 +1840,6 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
}
}
folio_batch_release(&fbatch);
- cond_resched();
if (ret || marked)
break;
@@ -1944,7 +1942,6 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
unlock_page(page);
}
folio_batch_release(&fbatch);
- cond_resched();
}
}
@@ -2046,7 +2043,6 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
break;
}
folio_batch_release(&fbatch);
- cond_resched();
if (wbc->nr_to_write == 0) {
step = 2;
@@ -2705,7 +2705,6 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
towrite -= tocopy;
off += tocopy;
data += tocopy;
- cond_resched();
}
if (len == towrite)
@@ -741,7 +741,6 @@ int fat_count_free_clusters(struct super_block *sb)
if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++;
} while (fat_ent_next(sbi, &fatent));
- cond_resched();
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
@@ -822,7 +821,6 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
if (need_resched()) {
fatent_brelse(&fatent);
unlock_fat(sbi);
- cond_resched();
lock_fat(sbi);
}
}
@@ -428,10 +428,8 @@ static struct fdtable *close_files(struct files_struct * files)
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
- if (file) {
+ if (file)
filp_close(file, files);
- cond_resched();
- }
}
i++;
set >>= 1;
@@ -708,11 +706,9 @@ static inline void __range_close(struct files_struct *files, unsigned int fd,
if (file) {
spin_unlock(&files->file_lock);
filp_close(file, files);
- cond_resched();
spin_lock(&files->file_lock);
} else if (need_resched()) {
spin_unlock(&files->file_lock);
- cond_resched();
spin_lock(&files->file_lock);
}
}
@@ -845,7 +841,6 @@ void do_close_on_exec(struct files_struct *files)
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
filp_close(file, files);
- cond_resched();
spin_lock(&files->file_lock);
}
@@ -1914,7 +1914,6 @@ static long writeback_sb_inodes(struct super_block *sb,
* give up the CPU.
*/
blk_flush_plug(current->plug, false);
- cond_resched();
}
/*
@@ -2621,8 +2620,6 @@ static void wait_sb_inodes(struct super_block *sb)
*/
filemap_fdatawait_keep_errors(mapping);
- cond_resched();
-
iput(inode);
rcu_read_lock();
@@ -357,7 +357,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
if (ret > 0)
ret = 0;
folio_batch_release(&fbatch);
- cond_resched();
}
if (!cycled && !done) {
@@ -1592,7 +1592,6 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
buf_in_tr = false;
}
gfs2_glock_dq_uninit(rd_gh);
- cond_resched();
goto more_rgrps;
}
out:
@@ -1962,7 +1961,6 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
if (current->journal_info) {
up_write(&ip->i_rw_mutex);
gfs2_trans_end(sdp);
- cond_resched();
}
gfs2_quota_unhold(ip);
out_metapath:
@@ -2073,7 +2073,7 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
}
rhashtable_walk_stop(&iter);
- } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
+ } while (gl == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
}
@@ -143,7 +143,6 @@ __acquires(&sdp->sd_ail_lock)
ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
if (need_resched()) {
blk_finish_plug(plug);
- cond_resched();
blk_start_plug(plug);
}
spin_lock(&sdp->sd_ail_lock);
@@ -1774,7 +1774,6 @@ static void gfs2_evict_inodes(struct super_block *sb)
iput(toput_inode);
toput_inode = inode;
- cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
@@ -77,8 +77,6 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head
hpfs_prefetch_sectors(s, secno, ahead);
- cond_resched();
-
*bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
if (bh != NULL)
return bh->b_data;
@@ -97,8 +95,6 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
hpfs_lock_assert(s);
- cond_resched();
-
if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
if (!buffer_uptodate(bh)) wait_on_buffer(bh);
set_buffer_uptodate(bh);
@@ -118,8 +114,6 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
hpfs_lock_assert(s);
- cond_resched();
-
if (secno & 3) {
pr_err("%s(): unaligned read\n", __func__);
return NULL;
@@ -168,8 +162,6 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
struct quad_buffer_head *qbh)
{
- cond_resched();
-
hpfs_lock_assert(s);
if (secno & 3) {
@@ -689,7 +689,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
folio_batch_release(&fbatch);
- cond_resched();
}
if (truncate_op)
@@ -867,8 +866,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
struct folio *folio;
unsigned long addr;
- cond_resched();
-
/*
* fallocate(2) manpage permits EINTR; we may have been
* interrupted because we are using up too much memory.
@@ -695,7 +695,6 @@ static void dispose_list(struct list_head *head)
list_del_init(&inode->i_lru);
evict(inode);
- cond_resched();
}
}
@@ -737,7 +736,6 @@ void evict_inodes(struct super_block *sb)
*/
if (need_resched()) {
spin_unlock(&sb->s_inode_list_lock);
- cond_resched();
dispose_list(&dispose);
goto again;
}
@@ -778,7 +776,6 @@ void invalidate_inodes(struct super_block *sb)
list_add(&inode->i_lru, &dispose);
if (need_resched()) {
spin_unlock(&sb->s_inode_list_lock);
- cond_resched();
dispose_list(&dispose);
goto again;
}
@@ -927,7 +927,6 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
if (unlikely(copied != status))
iov_iter_revert(i, copied - status);
- cond_resched();
if (unlikely(status == 0)) {
/*
* A short copy made iomap_write_end() reject the
@@ -1296,8 +1295,6 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
if (WARN_ON_ONCE(bytes == 0))
return -EIO;
- cond_resched();
-
pos += bytes;
written += bytes;
length -= bytes;
@@ -1533,10 +1530,8 @@ iomap_finish_ioends(struct iomap_ioend *ioend, int error)
completions = iomap_finish_ioend(ioend, error);
while (!list_empty(&tmp)) {
- if (completions > IOEND_BATCH_SIZE * 8) {
- cond_resched();
+ if (completions > IOEND_BATCH_SIZE * 8)
completions = 0;
- }
ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
list_del_init(&ioend->io_list);
completions += iomap_finish_ioend(ioend, error);
@@ -457,7 +457,6 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
}
spin_unlock(&journal->j_list_lock);
- cond_resched();
if (*nr_to_scan && next_tid)
goto again;
@@ -529,7 +528,6 @@ void jbd2_journal_destroy_checkpoint(journal_t *journal)
}
__jbd2_journal_clean_checkpoint_list(journal, true);
spin_unlock(&journal->j_list_lock);
- cond_resched();
}
}
@@ -729,7 +729,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
bh->b_end_io = journal_end_buffer_io_sync;
submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
}
- cond_resched();
/* Force a new descriptor to be generated next
time round the loop. */
@@ -811,7 +810,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
b_assoc_buffers);
wait_on_buffer(bh);
- cond_resched();
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
@@ -854,7 +852,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
wait_on_buffer(bh);
- cond_resched();
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
@@ -509,8 +509,6 @@ static int do_one_pass(journal_t *journal,
struct buffer_head * obh;
struct buffer_head * nbh;
- cond_resched();
-
/* If we already know where to stop the log traversal,
* check right now that we haven't gone past the end of
* the log. */
@@ -121,10 +121,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
- if (ic->scan_dents) {
+ if (ic->scan_dents)
jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
- cond_resched();
- }
}
dbg_fsbuild("pass 1 complete\n");
@@ -141,7 +139,6 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
continue;
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
- cond_resched();
}
dbg_fsbuild("pass 2a starting\n");
@@ -209,7 +206,6 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
jffs2_free_full_dirent(fd);
}
ic->scan_dents = NULL;
- cond_resched();
}
ret = jffs2_build_xattr_subsystem(c);
if (ret)
@@ -143,8 +143,6 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
BUG();
}
- /* Be nice */
- cond_resched();
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
}
@@ -387,7 +385,6 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
}
}
ofs += readlen;
- cond_resched();
}
ret = 0;
fail:
@@ -923,8 +923,6 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
- cond_resched();
-
/* We only care about obsolete ones */
if (!(ref_obsolete(raw)))
continue;
@@ -578,7 +578,6 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
}
jffs2_free_node_frag(frag);
- cond_resched();
}
}
@@ -185,8 +185,6 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
} else if (ret)
return ret;
- cond_resched();
-
if (signal_pending(current))
return -EINTR;
@@ -227,7 +225,14 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
spin_unlock(&c->erase_completion_lock);
if (ret == -EAGAIN)
- cond_resched();
+ /*
+ * The spin_unlock() above will implicitly reschedule
+ * if one is needed.
+ *
+ * In case we did not reschedule, take a breather here
+ * before retrying.
+ */
+ cpu_relax();
else
break;
}
@@ -1013,8 +1013,6 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
valid_ref = jffs2_first_valid_node(ref->next_in_ino);
spin_unlock(&c->erase_completion_lock);
- cond_resched();
-
/*
* At this point we don't know the type of the node we're going
* to read, so we do not know the size of its header. In order
@@ -143,8 +143,6 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
for (i=0; i<c->nr_blocks; i++) {
struct jffs2_eraseblock *jeb = &c->blocks[i];
- cond_resched();
-
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
@@ -621,8 +619,6 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
if (err)
return err;
- cond_resched();
-
if (ofs & 3) {
pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
ofs = PAD(ofs);
@@ -397,8 +397,6 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
dbg_summary("processing summary index %d\n", i);
- cond_resched();
-
/* Make sure there's a spare ref for dirty space */
err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
if (err)
@@ -2833,12 +2833,11 @@ void txQuiesce(struct super_block *sb)
mutex_lock(&jfs_ip->commit_mutex);
txCommit(tid, 1, &ip, 0);
txEnd(tid);
+ /*
+ * The mutex_unlock() reschedules if needed.
+ */
mutex_unlock(&jfs_ip->commit_mutex);
- /*
- * Just to be safe. I don't know how
- * long we can run without blocking
- */
- cond_resched();
+
TXN_LOCK();
}
@@ -2912,11 +2911,6 @@ int jfs_sync(void *arg)
mutex_unlock(&jfs_ip->commit_mutex);
iput(ip);
- /*
- * Just to be safe. I don't know how
- * long we can run without blocking
- */
- cond_resched();
TXN_LOCK();
} else {
/* We can't get the commit mutex. It may
@@ -125,9 +125,8 @@ static struct dentry *scan_positives(struct dentry *cursor,
if (need_resched()) {
list_move(&cursor->d_child, p);
p = &cursor->d_child;
- spin_unlock(&dentry->d_lock);
- cond_resched();
- spin_lock(&dentry->d_lock);
+
+ cond_resched_lock(&dentry->d_lock);
}
}
spin_unlock(&dentry->d_lock);
@@ -322,7 +322,6 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
spin_unlock(&cache->c_list_lock);
__mb_cache_entry_free(cache, entry);
shrunk++;
- cond_resched();
spin_lock(&cache->c_list_lock);
}
spin_unlock(&cache->c_list_lock);
@@ -1781,7 +1781,6 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
if (!(nd->flags & LOOKUP_RCU)) {
touch_atime(&last->link);
- cond_resched();
} else if (atime_needs_update(&last->link, inode)) {
if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
@@ -641,7 +641,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
netfs_rreq_assess(rreq, false);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
break;
- cond_resched();
}
ret = rreq->error;
@@ -650,7 +650,6 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
err = nfs_end_delegation_return(inode, delegation, 0);
iput(inode);
- cond_resched();
if (!err)
goto restart;
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
@@ -1186,7 +1185,6 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
nfs_put_delegation(delegation);
}
iput(inode);
- cond_resched();
goto restart;
}
rcu_read_unlock();
@@ -1318,7 +1316,6 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
put_cred(cred);
if (!nfs4_server_rebooted(server->nfs_client)) {
iput(inode);
- cond_resched();
goto restart;
}
nfs_inode_mark_test_expired_delegation(server,inode);
@@ -2665,14 +2665,12 @@ static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
spin_unlock(&inode->i_lock);
rcu_read_unlock();
pnfs_put_layout_hdr(lo);
- cond_resched();
goto restart;
}
spin_unlock(&inode->i_lock);
rcu_read_unlock();
pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
pnfs_put_layout_hdr(lo);
- cond_resched();
goto restart;
}
rcu_read_unlock();
@@ -1053,7 +1053,6 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
ret++;
if ((ret == max) && !cinfo->dreq)
break;
- cond_resched();
}
return ret;
}
@@ -1890,8 +1889,6 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
next:
nfs_unlock_and_release_request(req);
- /* Latency breaker */
- cond_resched();
}
nfss = NFS_SERVER(data->inode);
if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
@@ -1958,7 +1955,6 @@ static int __nfs_commit_inode(struct inode *inode, int how,
}
if (nscan < INT_MAX)
break;
- cond_resched();
}
nfs_commit_end(cinfo.mds);
if (ret || !may_wait)
@@ -2173,7 +2173,6 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
} while ((bh = bh->b_this_page) != head);
}
folio_batch_release(&fbatch);
- cond_resched();
}
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
@@ -1280,7 +1280,6 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
blkoff += n;
}
- cond_resched();
} while (true);
/* If ret is 1 then we just hit the end of the extent array */
@@ -277,7 +277,6 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
folio_unlock(folio);
}
folio_batch_release(&fbatch);
- cond_resched();
if (likely(!err))
goto repeat;
@@ -346,7 +345,6 @@ void nilfs_copy_back_pages(struct address_space *dmap,
folio_unlock(folio);
}
folio_batch_release(&fbatch);
- cond_resched();
goto repeat;
}
@@ -382,7 +380,6 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
folio_unlock(folio);
}
folio_batch_release(&fbatch);
- cond_resched();
}
}
@@ -539,7 +536,6 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
} while (++i < nr_folios);
folio_batch_release(&fbatch);
- cond_resched();
goto repeat;
out_locked:
@@ -361,7 +361,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
nilfs_segctor_do_immediate_flush(sci);
up_write(&nilfs->ns_segctor_sem);
- cond_resched();
}
if (gcflag)
ti->ti_flags |= NILFS_TI_GC;
@@ -746,13 +745,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
ndirties++;
if (unlikely(ndirties >= nlimit)) {
folio_batch_release(&fbatch);
- cond_resched();
return ndirties;
}
} while (bh = bh->b_this_page, bh != head);
}
folio_batch_release(&fbatch);
- cond_resched();
goto repeat;
}
@@ -785,7 +782,6 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
} while (bh != head);
}
folio_batch_release(&fbatch);
- cond_resched();
}
}
@@ -805,7 +805,6 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
* User can supply arbitrarily large buffer. Avoid softlockups
* in case there are lots of available events.
*/
- cond_resched();
event = get_one_event(group, count);
if (IS_ERR(event)) {
ret = PTR_ERR(event);
@@ -79,7 +79,6 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
iput_inode = inode;
- cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
@@ -2556,7 +2556,6 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
set_page_dirty(page);
put_page(page);
balance_dirty_pages_ratelimited(mapping);
- cond_resched();
if (idx == end)
goto done;
idx++;
@@ -2597,7 +2596,6 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
unlock_page(page);
put_page(page);
balance_dirty_pages_ratelimited(mapping);
- cond_resched();
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
@@ -2614,7 +2612,6 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
set_page_dirty(page);
put_page(page);
balance_dirty_pages_ratelimited(mapping);
- cond_resched();
}
done:
ntfs_debug("Done.");
@@ -259,7 +259,6 @@ static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
* files.
*/
balance_dirty_pages_ratelimited(mapping);
- cond_resched();
} while (++index < end_index);
read_lock_irqsave(&ni->size_lock, flags);
BUG_ON(ni->initialized_size != new_init_size);
@@ -1868,7 +1867,6 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
iov_iter_revert(i, copied);
break;
}
- cond_resched();
if (unlikely(copied < bytes)) {
iov_iter_revert(i, copied);
if (copied)
@@ -158,7 +158,6 @@ static int ntfs_extend_initialized_size(struct file *file,
break;
balance_dirty_pages_ratelimited(mapping);
- cond_resched();
}
return 0;
@@ -241,7 +240,6 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
unlock_page(page);
put_page(page);
- cond_resched();
}
out:
mark_inode_dirty(inode);
@@ -1005,13 +1003,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
- /*
- * We can loop for a long time in here. Be nice and allow
- * us to schedule out to avoid softlocking if preempt
- * is disabled.
- */
- cond_resched();
-
pos += copied;
written += copied;
@@ -2265,8 +2265,6 @@ int ni_decompress_file(struct ntfs_inode *ni)
if (err)
goto out;
-
- cond_resched();
}
remove_wof:
@@ -7637,10 +7637,8 @@ int ocfs2_trim_mainbm(struct super_block *sb, struct fstrim_range *range)
* main_bm related locks for avoiding the current IO starve, then go to
* trim the next group
*/
- if (ret >= 0 && group <= last_group) {
- cond_resched();
+ if (ret >= 0 && group <= last_group)
goto next_group;
- }
out:
range->len = trimmed * sb->s_blocksize;
return ret;
@@ -951,7 +951,12 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
if (ret == (ssize_t)-EAGAIN) {
mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
" returned EAGAIN\n", size, SC_NODEF_ARGS(sc));
- cond_resched();
+
+ /*
+ * Take a breather before retrying. Though maybe this
+ * should be a wait on an event or a timeout?
+ */
+ cpu_relax();
continue;
}
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
@@ -1929,7 +1934,6 @@ static void o2net_accept_many(struct work_struct *work)
o2net_accept_one(sock, &more);
if (!more)
break;
- cond_resched();
}
}
@@ -792,11 +792,10 @@ static int dlm_thread(void *data)
spin_unlock(&dlm->spinlock);
dlm_flush_asts(dlm);
- /* yield and continue right away if there is more work to do */
- if (!n) {
- cond_resched();
+ /* An unlock above would have led to a yield if one was
+ * needed. Continue right away if there is more to do */
+ if (!n)
continue;
- }
wait_event_interruptible_timeout(dlm->dlm_thread_wq,
!dlm_dirty_list_empty(dlm) ||
@@ -940,6 +940,10 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
BUG_ON(range_start >= range_end);
while (zero_pos < range_end) {
+ /*
+ * If this is a very long extent, then we might be here
+ * awhile. We should expect the scheduler to preempt us.
+ */
next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
if (next_pos > range_end)
next_pos = range_end;
@@ -949,12 +953,6 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
break;
}
zero_pos = next_pos;
-
- /*
- * Very large extends have the potential to lock up
- * the cpu for extended periods of time.
- */
- cond_resched();
}
return rc;
@@ -3532,7 +3532,6 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
char name[10 + 1];
unsigned int len;
- cond_resched();
if (!has_pid_permissions(fs_info, iter.task, HIDEPID_INVISIBLE))
continue;
@@ -272,7 +272,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
name, len, instantiate, p,
&data))
goto out;
- cond_resched();
rcu_read_lock();
}
rcu_read_unlock();
@@ -491,7 +491,6 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
page_offline_thaw();
- cond_resched();
page_offline_freeze();
}
@@ -80,8 +80,6 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
pfn++;
out++;
count -= KPMSIZE;
-
- cond_resched();
}
*ppos += (char __user *)out - buf;
@@ -258,8 +256,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
pfn++;
out++;
count -= KPMSIZE;
-
- cond_resched();
}
*ppos += (char __user *)out - buf;
@@ -313,8 +309,6 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
pfn++;
out++;
count -= KPMSIZE;
-
- cond_resched();
}
*ppos += (char __user *)out - buf;
@@ -629,7 +629,6 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
smaps_pte_entry(pte, addr, walk);
pte_unmap_unlock(pte - 1, ptl);
out:
- cond_resched();
return 0;
}
@@ -1210,7 +1209,6 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
ClearPageReferenced(page);
}
pte_unmap_unlock(pte - 1, ptl);
- cond_resched();
return 0;
}
@@ -1554,8 +1552,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
}
pte_unmap_unlock(orig_pte, ptl);
- cond_resched();
-
return err;
}
@@ -1605,8 +1601,6 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
frame++;
}
- cond_resched();
-
return err;
}
#else
@@ -1899,7 +1893,6 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(orig_pte, ptl);
- cond_resched();
return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
@@ -1068,7 +1068,6 @@ static int add_dquot_ref(struct super_block *sb, int type)
* later.
*/
old_inode = inode;
- cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
@@ -814,7 +814,6 @@ static int write_ordered_buffers(spinlock_t * lock,
if (chunk.nr)
write_ordered_chunk(&chunk);
wait_on_buffer(bh);
- cond_resched();
spin_lock(lock);
goto loop_next;
}
@@ -1671,7 +1670,6 @@ static int write_one_transaction(struct super_block *s,
}
next:
cn = cn->next;
- cond_resched();
}
return ret;
}
@@ -573,7 +573,6 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
*routp = res_out;
if (res_ex)
*rexp = res_ex;
- cond_resched();
}
wait->_qproc = NULL;
if (retval || timed_out || signal_pending(current))
@@ -2713,7 +2713,6 @@ static void cifs_extend_writeback(struct address_space *mapping,
}
folio_batch_release(&batch);
- cond_resched();
} while (!stop);
*_len = len;
@@ -2951,7 +2950,6 @@ static int cifs_writepages_region(struct address_space *mapping,
}
folio_batch_release(&fbatch);
- cond_resched();
} while (wbc->nr_to_write > 0);
*_next = start;
@@ -604,7 +604,6 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_from_pipe_begin(sd);
do {
- cond_resched();
ret = splice_from_pipe_next(pipe, sd);
if (ret > 0)
ret = splice_from_pipe_feed(pipe, sd, actor);
@@ -477,7 +477,6 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
}
err = make_free_space(c);
- cond_resched();
if (err == -EAGAIN) {
dbg_budg("try again");
goto again;
@@ -309,7 +309,6 @@ int ubifs_bg_thread(void *info)
ubifs_ro_mode(c, err);
run_bg_commit(c);
- cond_resched();
}
ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name);
@@ -852,7 +852,6 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
sleb->nodes_cnt, sleb->endpt);
list_for_each_entry(snod, &sleb->nodes, list) {
- cond_resched();
pr_err("Dumping node at LEB %d:%d len %d\n", lnum,
snod->offs, snod->len);
ubifs_dump_node(c, snod->node, c->leb_size - snod->offs);
@@ -1622,8 +1621,6 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
while (1) {
int idx;
- cond_resched();
-
if (znode_cb) {
err = znode_cb(c, znode, priv);
if (err) {
@@ -2329,7 +2326,6 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
ino_t inuma, inumb;
uint32_t blka, blkb;
- cond_resched();
sa = container_of(cur, struct ubifs_scan_node, list);
sb = container_of(cur->next, struct ubifs_scan_node, list);
@@ -2396,7 +2392,6 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
ino_t inuma, inumb;
uint32_t hasha, hashb;
- cond_resched();
sa = container_of(cur, struct ubifs_scan_node, list);
sb = container_of(cur->next, struct ubifs_scan_node, list);
@@ -683,7 +683,6 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
kfree(file->private_data);
ctx->pos = key_hash_flash(c, &dent->key);
file->private_data = dent;
- cond_resched();
}
out:
@@ -109,7 +109,6 @@ static int data_nodes_cmp(void *priv, const struct list_head *a,
struct ubifs_info *c = priv;
struct ubifs_scan_node *sa, *sb;
- cond_resched();
if (a == b)
return 0;
@@ -153,7 +152,6 @@ static int nondata_nodes_cmp(void *priv, const struct list_head *a,
struct ubifs_info *c = priv;
struct ubifs_scan_node *sa, *sb;
- cond_resched();
if (a == b)
return 0;
@@ -305,7 +303,6 @@ static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
{
int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
- cond_resched();
err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
if (err)
return err;
@@ -695,8 +692,6 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
/* Maybe continue after find and break before find */
lp.lnum = -1;
- cond_resched();
-
/* Give the commit an opportunity to run */
if (ubifs_gc_should_commit(c)) {
ret = -EAGAIN;
@@ -683,8 +683,6 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
for (i = 0; i < c->jhead_cnt; i++) {
struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
- cond_resched();
-
/*
* If the mutex is locked then wbuf is being changed, so
* synchronization is not necessary.
@@ -1113,8 +1113,6 @@ static int scan_check_cb(struct ubifs_info *c,
list_for_each_entry(snod, &sleb->nodes, list) {
int found, level = 0;
- cond_resched();
-
if (is_idx == -1)
is_idx = (snod->type == UBIFS_IDX_NODE) ? 1 : 0;
@@ -1483,7 +1483,6 @@ static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs)
for (; nnode; nnode = next_nnode(c, nnode, &hght)) {
struct ubifs_nbranch *branch;
- cond_resched();
if (nnode->parent) {
branch = &nnode->parent->nbranch[nnode->iip];
if (branch->lnum != lnum || branch->offs != offs)
@@ -1517,7 +1516,6 @@ static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs)
struct ubifs_pnode *pnode;
struct ubifs_nbranch *branch;
- cond_resched();
pnode = ubifs_pnode_lookup(c, i);
if (IS_ERR(pnode))
return PTR_ERR(pnode);
@@ -1673,7 +1671,6 @@ int dbg_check_ltab(struct ubifs_info *c)
pnode = ubifs_pnode_lookup(c, i);
if (IS_ERR(pnode))
return PTR_ERR(pnode);
- cond_resched();
}
/* Check nodes */
@@ -957,7 +957,6 @@ static int dbg_read_orphans(struct check_info *ci, struct ubifs_scan_leb *sleb)
int i, n, err;
list_for_each_entry(snod, &sleb->nodes, list) {
- cond_resched();
if (snod->type != UBIFS_ORPH_NODE)
continue;
orph = snod->node;
@@ -638,8 +638,6 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
dbg_scan("look at LEB %d:%d (%d bytes left)",
lnum, offs, len);
- cond_resched();
-
/*
* Scan quietly until there is an error from which we cannot
* recover
@@ -999,8 +997,6 @@ static int clean_an_unclean_leb(struct ubifs_info *c,
while (len >= 8) {
int ret;
- cond_resched();
-
/* Scan quietly until there is an error */
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
@@ -305,7 +305,6 @@ static int replay_entries_cmp(void *priv, const struct list_head *a,
struct ubifs_info *c = priv;
struct replay_entry *ra, *rb;
- cond_resched();
if (a == b)
return 0;
@@ -332,8 +331,6 @@ static int apply_replay_list(struct ubifs_info *c)
list_sort(c, &c->replay_list, &replay_entries_cmp);
list_for_each_entry(r, &c->replay_list, list) {
- cond_resched();
-
err = apply_replay_entry(c, r);
if (err)
return err;
@@ -722,8 +719,6 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
u8 hash[UBIFS_HASH_ARR_SZ];
int deletion = 0;
- cond_resched();
-
if (snod->sqnum >= SQNUM_WATERMARK) {
ubifs_err(c, "file system's life ended");
goto out_dump;
@@ -1060,8 +1055,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
}
list_for_each_entry(snod, &sleb->nodes, list) {
- cond_resched();
-
if (snod->sqnum >= SQNUM_WATERMARK) {
ubifs_err(c, "file system's life ended");
goto out_dump;
@@ -269,8 +269,6 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
dbg_scan("look at LEB %d:%d (%d bytes left)",
lnum, offs, len);
- cond_resched();
-
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
if (ret > 0) {
/* Padding bytes or a valid padding node */
@@ -125,7 +125,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
zprev = znode;
znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
- cond_resched();
}
return total_freed;
@@ -949,8 +949,6 @@ static int check_volume_empty(struct ubifs_info *c)
c->empty = 0;
break;
}
-
- cond_resched();
}
return 0;
@@ -857,8 +857,6 @@ static int write_index(struct ubifs_info *c)
while (1) {
u8 hash[UBIFS_HASH_ARR_SZ];
- cond_resched();
-
znode = cnext;
idx = c->cbuf + used;
@@ -235,7 +235,6 @@ long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
!ubifs_zn_dirty(zn->zbranch[n].znode))
clean_freed += 1;
- cond_resched();
kfree(zn->zbranch[n].znode);
}
@@ -914,7 +914,6 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
mmap_write_lock(mm);
prev = NULL;
for_each_vma(vmi, vma) {
- cond_resched();
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
!!(vma->vm_flags & __VM_UFFD_FLAGS));
if (vma->vm_userfaultfd_ctx.ctx != ctx) {
@@ -1277,7 +1276,6 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
seq = read_seqcount_begin(&ctx->refile_seq);
need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
waitqueue_active(&ctx->fault_wqh);
- cond_resched();
} while (read_seqcount_retry(&ctx->refile_seq, seq));
if (need_wakeup)
__wake_userfault(ctx, range);
@@ -1392,8 +1390,6 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
basic_ioctls = false;
cur = vma;
do {
- cond_resched();
-
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
!!(cur->vm_flags & __VM_UFFD_FLAGS));
@@ -1458,7 +1454,6 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
ret = 0;
for_each_vma_range(vmi, vma, end) {
- cond_resched();
BUG_ON(!vma_can_userfault(vma, vm_flags));
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
@@ -1603,8 +1598,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
found = false;
cur = vma;
do {
- cond_resched();
-
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
!!(cur->vm_flags & __VM_UFFD_FLAGS));
@@ -1629,8 +1622,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
ret = 0;
for_each_vma_range(vmi, vma, end) {
- cond_resched();
-
BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
/*
@@ -152,7 +152,6 @@ static int build_merkle_tree(struct file *filp,
err = -EINTR;
goto out;
}
- cond_resched();
}
/* Finish all nonempty pending tree blocks. */
for (level = 0; level < num_levels; level++) {
@@ -71,7 +71,6 @@ static int fsverity_read_merkle_tree(struct inode *inode,
err = -EINTR;
break;
}
- cond_resched();
offs_in_page = 0;
}
return retval ? retval : err;
@@ -16,13 +16,6 @@ xchk_should_terminate(
struct xfs_scrub *sc,
int *error)
{
- /*
- * If preemption is disabled, we need to yield to the scheduler every
- * few seconds so that we don't run afoul of the soft lockup watchdog
- * or RCU stall detector.
- */
- cond_resched();
-
if (fatal_signal_pending(current)) {
if (*error == 0)
*error = -EINTR;
@@ -498,13 +498,6 @@ xfarray_sort_terminated(
struct xfarray_sortinfo *si,
int *error)
{
- /*
- * If preemption is disabled, we need to yield to the scheduler every
- * few seconds so that we don't run afoul of the soft lockup watchdog
- * or RCU stall detector.
- */
- cond_resched();
-
if ((si->flags & XFARRAY_SORT_KILLABLE) &&
fatal_signal_pending(current)) {
if (*error == 0)
@@ -171,7 +171,6 @@ xfs_end_io(
list_del_init(&ioend->io_list);
iomap_ioend_try_merge(ioend, &tmp);
xfs_end_ioend(ioend);
- cond_resched();
}
}
@@ -1716,8 +1716,6 @@ xfs_icwalk_ag(
if (error == -EFSCORRUPTED)
break;
- cond_resched();
-
if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
if (icw->icw_scan_limit <= 0)
@@ -420,7 +420,6 @@ xfs_iwalk_ag(
struct xfs_inobt_rec_incore *irec;
xfs_ino_t rec_fsino;
- cond_resched();
if (xfs_pwork_want_abort(&iwag->pwork))
goto out;