@@ -182,7 +182,6 @@ static int __init dummy_init_module(void)
for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
- cond_resched();
}
if (err < 0)
__rtnl_link_unregister(&dummy_link_ops);
@@ -12040,7 +12040,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
ret = -EINTR;
goto eeprom_done;
}
- cond_resched();
+ cond_resched_stall();
}
}
eeprom->len += i;
@@ -3937,7 +3937,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw);
- cond_resched();
}
}
@@ -4088,7 +4087,6 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM;
e1000_standby_eeprom(hw);
- cond_resched();
/* Send the WRITE ENABLE command (8 bit opcode ) */
e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
@@ -4198,7 +4196,6 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Recover from write */
e1000_standby_eeprom(hw);
- cond_resched();
words_written++;
}
@@ -309,7 +309,7 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return 0;
if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
break;
- cond_resched();
+ cond_resched_stall();
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
@@ -148,7 +148,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
mlx4_warn(dev, "VF Reset succeed\n");
return 0;
}
- cond_resched();
+ cond_resched_stall();
}
mlx4_err(dev, "Fail to send reset over the communication channel\n");
return -ETIMEDOUT;
@@ -312,7 +312,8 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
end = msecs_to_jiffies(timeout) + jiffies;
while (comm_pending(dev) && time_before(jiffies, end))
- cond_resched();
+ cond_resched_stall();
+
ret_from_pending = comm_pending(dev);
if (ret_from_pending) {
/* check if the slave is trying to boot in the middle of
@@ -387,7 +388,7 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
end = msecs_to_jiffies(timeout) + jiffies;
while (comm_pending(dev) && time_before(jiffies, end))
- cond_resched();
+ cond_resched_stall();
}
goto out;
@@ -470,7 +471,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
}
- cond_resched();
+ cond_resched_stall();
}
/*
@@ -621,8 +622,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
err = mlx4_internal_err_ret_value(dev, op, op_modifier);
goto out;
}
-
- cond_resched();
+ cond_resched_stall();
}
if (cmd_pending(dev)) {
@@ -2324,8 +2324,7 @@ static int sync_toggles(struct mlx4_dev *dev)
priv->cmd.comm_toggle = rd_toggle >> 31;
return 0;
}
-
- cond_resched();
+ cond_resched_stall();
}
/*
@@ -4649,7 +4649,14 @@ static int move_all_busy(struct mlx4_dev *dev, int slave,
if (time_after(jiffies, begin + 5 * HZ))
break;
if (busy)
- cond_resched();
+ /*
+ * Giving up the spinlock in _move_all_busy() will
+ * reschedule if needed.
+ * Add a cpu_relax() here to ensure that we give
+ * others a chance to acquire the lock.
+ */
+ cpu_relax();
+
} while (busy);
if (busy)
@@ -285,7 +285,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
ent->ret = 0;
return;
}
- cond_resched();
+ cond_resched_stall();
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -1773,13 +1773,11 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
while (down_trylock(&cmd->vars.sem)) {
mlx5_cmd_trigger_completions(dev);
- cond_resched();
}
}
while (down_trylock(&cmd->vars.pages_sem)) {
mlx5_cmd_trigger_completions(dev);
- cond_resched();
}
/* Unlock cmdif */
@@ -373,8 +373,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
break;
-
- cond_resched();
+ cond_resched_stall();
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
@@ -180,7 +180,6 @@ static int mlxsw_i2c_wait_go_bit(struct i2c_client *client,
break;
}
}
- cond_resched();
} while ((time_before(jiffies, end)) || (i++ < MLXSW_I2C_RETRY));
if (wait_done) {
@@ -361,8 +360,6 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
err = i2c_transfer(client->adapter, &write_tran, 1);
if (err == 1)
break;
-
- cond_resched();
} while ((time_before(jiffies, end)) ||
(j++ < MLXSW_I2C_RETRY));
@@ -473,8 +470,6 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
ARRAY_SIZE(read_tran));
if (err == ARRAY_SIZE(read_tran))
break;
-
- cond_resched();
} while ((time_before(jiffies, end)) ||
(j++ < MLXSW_I2C_RETRY));
@@ -1455,7 +1455,6 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
return 0;
- cond_resched();
} while (time_before(jiffies, end));
*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
@@ -1824,7 +1823,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
break;
}
- cond_resched();
} while (time_before(jiffies, end));
} else {
wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
@@ -1225,7 +1225,6 @@ static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
break;
- cond_resched();
}
if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
@@ -1246,7 +1245,6 @@ static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
break;
- cond_resched();
}
if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
@@ -1265,7 +1263,6 @@ static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
break;
- cond_resched();
}
if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
@@ -326,8 +326,6 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
long timeout = 0;
long done = 0;
- cond_resched();
-
while (done == 0) {
done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
done &= 2;
@@ -2023,7 +2023,6 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
break;
}
entry += p_hdr->size;
- cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}
@@ -295,7 +295,6 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
long done = 0;
int err = 0;
- cond_resched();
while (done == 0) {
done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
done &= 2;
@@ -702,7 +702,6 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
addr += 16;
reg_read -= 16;
ret += 16;
- cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,7 +1382,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
- cond_resched();
}
fw_dump->clr = 1;
@@ -630,8 +630,6 @@ falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
break;
pos += block_len;
- /* Avoid locking up the system */
- cond_resched();
if (signal_pending(current)) {
rc = -EINTR;
break;
@@ -723,8 +721,6 @@ falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
pos += block_len;
- /* Avoid locking up the system */
- cond_resched();
if (signal_pending(current)) {
rc = -EINTR;
break;
@@ -839,8 +835,6 @@ falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
if (memcmp(empty, buffer, block_len))
return -EIO;
- /* Avoid locking up the system */
- cond_resched();
if (signal_pending(current))
return -EINTR;
}
@@ -434,7 +434,6 @@ static int __init ifb_init_module(void)
for (i = 0; i < numifbs && !err; i++) {
err = ifb_init_one(i);
- cond_resched();
}
if (err)
__rtnl_link_unregister(&ifb_link_ops);
@@ -292,7 +292,6 @@ void ipvlan_process_multicast(struct work_struct *work)
kfree_skb(skb);
}
dev_put(dev);
- cond_resched();
}
}
@@ -341,8 +341,6 @@ static void macvlan_process_broadcast(struct work_struct *w)
if (src)
dev_put(src->dev);
consume_skb(skb);
-
- cond_resched();
}
}
@@ -291,9 +291,9 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
}
/* Do not hog the CPU if rx buffers are consumed faster than
- * queued (unlikely).
+ * queued (uhlikely).
*/
- cond_resched();
+ cond_resched_stall();
}
/* If we're still starved of rx buffers, reschedule later */
@@ -1492,7 +1492,6 @@ static void nsim_fib_event_work(struct work_struct *work)
nsim_fib_event(fib_event);
list_del(&fib_event->list);
kfree(fib_event);
- cond_resched();
}
mutex_unlock(&data->fib_lock);
}
@@ -4015,7 +4015,6 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
virtnet_sq_free_unused_buf(vq, buf);
- cond_resched();
}
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -4023,7 +4022,6 @@ static void free_unused_bufs(struct virtnet_info *vi)
while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
virtnet_rq_free_unused_buf(rq->vq, buf);
- cond_resched();
}
}
@@ -74,8 +74,6 @@ static void wg_ratelimiter_gc_entries(struct work_struct *work)
}
#endif
spin_unlock(&table_lock);
- if (likely(work))
- cond_resched();
}
if (likely(work))
queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
@@ -213,7 +213,6 @@ void wg_packet_handshake_receive_worker(struct work_struct *work)
wg_receive_handshake_packet(wg, skb);
dev_kfree_skb(skb);
atomic_dec(&wg->handshake_queue_len);
- cond_resched();
}
}
@@ -501,8 +500,6 @@ void wg_packet_decrypt_worker(struct work_struct *work)
likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
wg_queue_enqueue_per_peer_rx(skb, state);
- if (need_resched())
- cond_resched();
}
}
@@ -279,8 +279,6 @@ void wg_packet_tx_worker(struct work_struct *work)
wg_noise_keypair_put(keypair, false);
wg_peer_put(peer);
- if (need_resched())
- cond_resched();
}
}
@@ -303,8 +301,6 @@ void wg_packet_encrypt_worker(struct work_struct *work)
}
}
wg_queue_enqueue_per_peer_tx(first, state);
- if (need_resched())
- cond_resched();
}
}
@@ -112,10 +112,10 @@ static u16 lo_measure_feedthrough(struct b43_wldev *dev,
udelay(21);
feedthrough = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
- /* This is a good place to check if we need to relax a bit,
+ /* This is a good place to check if we need to relax a bit
* as this is the main function called regularly
- * in the LO calibration. */
- cond_resched();
+ * in the L0 calibration. */
+ cond_resched_stall();
return feedthrough;
}
@@ -768,7 +768,6 @@ void b43_pio_rx(struct b43_pio_rxqueue *q)
stop = !pio_rx_frame(q);
if (stop)
break;
- cond_resched();
if (WARN_ON_ONCE(++count > 10000))
break;
}
@@ -1113,7 +1113,6 @@ static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev)
ret += b43legacy_phy_read(dev, 0x002C);
}
local_irq_restore(flags);
- cond_resched();
return ret;
}
@@ -1242,7 +1241,6 @@ u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev,
}
ret = b43legacy_phy_read(dev, 0x002D);
local_irq_restore(flags);
- cond_resched();
return ret;
}
@@ -1580,7 +1578,6 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
b43legacy_radio_write16(dev, 0x43, i);
b43legacy_radio_write16(dev, 0x52, phy->txctl2);
udelay(10);
- cond_resched();
b43legacy_phy_set_baseband_attenuation(dev, j * 2);
@@ -1631,7 +1628,6 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
phy->txctl2
| (3/*txctl1*/ << 4));
udelay(10);
- cond_resched();
b43legacy_phy_set_baseband_attenuation(dev, j * 2);
@@ -1654,7 +1650,6 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2);
udelay(2);
b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3);
- cond_resched();
} else
b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0);
b43legacy_phy_lo_adjust(dev, is_initializing);
@@ -3979,7 +3979,6 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
static __always_inline void brcmf_delay(u32 ms)
{
if (ms < 1000 / HZ) {
- cond_resched();
mdelay(ms);
} else {
msleep(ms);
@@ -3988,8 +3988,6 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp,
if ((IN4500(ai, COMMAND)) == pCmd->cmd)
// PC4500 didn't notice command, try again
OUT4500(ai, COMMAND, pCmd->cmd);
- if (may_sleep && (max_tries & 255) == 0)
- cond_resched();
}
if (max_tries == -1) {
@@ -2309,8 +2309,6 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
}
iwl_trans_release_nic_access(trans);
- if (resched)
- cond_resched();
} else {
return -EBUSY;
}
@@ -632,7 +632,6 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
break;
}
}
- cond_resched();
udelay(1);
} while (--loops);
@@ -795,7 +794,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
break;
}
- cond_resched();
udelay(1);
} while (--loops);
@@ -130,7 +130,6 @@ int __mt76_worker_fn(void *ptr)
set_bit(MT76_WORKER_RUNNING, &w->state);
set_current_state(TASK_RUNNING);
w->fn(w);
- cond_resched();
clear_bit(MT76_WORKER_RUNNING, &w->state);
}
@@ -400,7 +400,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
/* Do not hog the CPU if rx buffers are consumed faster than
* queued (unlikely).
*/
- cond_resched();
+ cond_resched_stall();
}
/* If we're still starved of rx buffers, reschedule later */
@@ -423,7 +423,6 @@ static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
drb_send_cnt = t7xx_txq_burst_send_skb(txq);
if (drb_send_cnt <= 0) {
usleep_range(10, 20);
- cond_resched();
continue;
}
@@ -437,8 +436,6 @@ static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
-
- cond_resched();
} while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
(dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
}
@@ -1571,7 +1571,6 @@ int xenvif_dealloc_kthread(void *data)
break;
xenvif_tx_dealloc_action(queue);
- cond_resched();
}
/* Unmap anything remaining*/
@@ -669,8 +669,6 @@ int xenvif_kthread_guest_rx(void *data)
* slots.
*/
xenvif_rx_queue_drop_expired(queue);
-
- cond_resched();
}
/* Bin any remaining skbs */