@@ -536,12 +536,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (!pmu->version)
return;
- pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
+ pmu->nr_arch_gp_counters = min__t(int, eax.split.num_counters,
kvm_pmu_cap.num_counters_gp);
- eax.split.bit_width = min_t(int, eax.split.bit_width,
+ eax.split.bit_width = min__t(int, eax.split.bit_width,
kvm_pmu_cap.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
- eax.split.mask_length = min_t(int, eax.split.mask_length,
+ eax.split.mask_length = min__t(int, eax.split.mask_length,
kvm_pmu_cap.events_mask_len);
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
@@ -553,7 +553,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
min3(ARRAY_SIZE(fixed_pmc_events),
(size_t) edx.split.num_counters_fixed,
(size_t)kvm_pmu_cap.num_counters_fixed);
- edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
+ edx.split.bit_width_fixed = min__t(int, edx.split.bit_width_fixed,
kvm_pmu_cap.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
@@ -1306,7 +1306,7 @@ static long populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end,
unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{
- long cur_pages = 0;
+ unsigned long cur_pages = 0;
pmd_t *pmd;
pgprot_t pmd_pgprot;
@@ -1234,7 +1234,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
for (left = size, i = 0; left > 0; left -= len, i++) {
struct page *page = pages[i];
- len = min_t(size_t, PAGE_SIZE - offset, left);
+ len = min_unsigned(PAGE_SIZE - offset, left);
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
ret = bio_iov_add_zone_append_page(bio, page, len,
offset);
@@ -141,7 +141,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (val < 0)
return -EINVAL;
bd->reserved_size =
- min_t(unsigned int, val, queue_max_bytes(q));
+ min_unsigned(val, queue_max_bytes(q));
return 0;
case SG_EMULATED_HOST:
return put_user(1, intp);
@@ -215,7 +215,7 @@ static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
- sz, min_t(uint32_t,
+ sz, min__t(uint32_t,
total_sectors - 1, 0xFFFFFFFF));
}
done:
@@ -200,7 +200,7 @@ static int property_entry_read_string_array(const struct property_entry *props,
if (!strings)
return array_len;
- array_len = min_t(size_t, nval, array_len);
+ array_len = min_unsigned(nval, array_len);
length = array_len * sizeof(*strings);
pointer = property_entry_find(props, propname, length);
@@ -134,7 +134,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
if (priv->response_length) {
priv->response_read = true;
- ret_size = min_t(ssize_t, size, priv->response_length);
+ ret_size = min__t(ssize_t, size, priv->response_length);
if (ret_size <= 0) {
priv->response_length = 0;
goto out;
@@ -271,7 +271,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
dev_err(&chip->dev, "Unable to read burstcount\n");
return burstcnt;
}
- burstcnt = min_t(int, burstcnt, count - size);
+ burstcnt = min_unsigned(burstcnt, count - size);
rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + size);
@@ -371,7 +371,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
rc = burstcnt;
goto out_err;
}
- burstcnt = min_t(int, burstcnt, len - count - 1);
+ burstcnt = min_unsigned(burstcnt, len - count - 1);
rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + count);
if (rc < 0)
@@ -480,7 +480,7 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
value = _get_val(table, div, flags, width);
- return min_t(unsigned int, value, clk_div_mask(width));
+ return min_unsigned(value, clk_div_mask(width));
}
EXPORT_SYMBOL_GPL(divider_get_val);
@@ -120,7 +120,7 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
return NULL;
i = 0;
do {
- size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
+ size_t copy = min_unsigned(len, 1 << ioat_chan->xfercap_log);
desc = ioat_get_ring_ent(ioat_chan, idx + i);
hw = desc->hw;
@@ -190,8 +190,7 @@ __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
i = 0;
do {
struct ioat_raw_descriptor *descs[2];
- size_t xfer_size = min_t(size_t,
- len, 1 << ioat_chan->xfercap_log);
+ size_t xfer_size = min_unsigned( len, 1 << ioat_chan->xfercap_log);
int s;
desc = ioat_get_ring_ent(ioat_chan, idx + i);
@@ -386,7 +385,7 @@ __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
i = 0;
do {
struct ioat_raw_descriptor *descs[2];
- size_t xfer_size = min_t(size_t, len,
+ size_t xfer_size = min_unsigned(len,
1 << ioat_chan->xfercap_log);
desc = ioat_get_ring_ent(ioat_chan, idx + i);
@@ -494,7 +493,7 @@ __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
do {
struct ioat_raw_descriptor *descs[4];
- size_t xfer_size = min_t(size_t, len,
+ size_t xfer_size = min_unsigned(len,
1 << ioat_chan->xfercap_log);
desc = ioat_get_ring_ent(ioat_chan, idx + i);
@@ -1725,9 +1725,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
if (lasth)
- sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+ sizes.fb_width = min_unsigned(desired_mode->hdisplay + x, sizes.fb_width);
if (lastv)
- sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
+ sizes.fb_height = min_unsigned(desired_mode->vdisplay + y, sizes.fb_height);
}
mutex_unlock(&client->modeset_mutex);
@@ -79,7 +79,7 @@ module_param_cb(debug, ¶m_ops_dyndbg_classes, &drm_debug_bitmap, 0600);
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
struct drm_print_iterator *iterator = p->arg;
- ssize_t len;
+ size_t len;
if (!iterator->remain)
return;
@@ -108,7 +108,7 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
} else {
ssize_t pos = iterator->offset - iterator->start;
- len = min_t(ssize_t, strlen(str), iterator->remain);
+ len = min_unsigned(strlen(str), iterator->remain);
memcpy(iterator->data + pos, str, len);
@@ -190,7 +190,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
return MAX_JIFFY_OFFSET;
- return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+ return min_unsigned(MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
static unsigned long to_wait_timeout(s64 timeout_ns)
@@ -352,7 +352,7 @@ static int emit_pte(struct i915_request *rq,
const u64 encode = rq->context->vm->pte_encode(0, cache_level,
is_lmem ? PTE_LM : 0);
struct intel_ring *ring = rq->ring;
- int pkt, dword_length;
+ unsigned int pkt, dword_length;
u32 total = 0;
u32 page_size;
u32 *hdr, *cs;
@@ -397,7 +397,7 @@ static int emit_pte(struct i915_request *rq,
do {
if (cs - hdr >= pkt) {
- int dword_rem;
+ unsigned int dword_rem;
*hdr += cs - hdr - 2;
*cs++ = MI_NOOP;
@@ -74,7 +74,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
return ret;
count = round_down(count, sizeof(u32));
- count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+ count = min_unsigned(GEN7_L3LOG_SIZE - offset, count);
memset(buf, 0, count);
spin_lock(&i915->gem.contexts.lock);
@@ -226,7 +226,7 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+ return min_unsigned(MAX_JIFFY_OFFSET, j + 1);
}
/*
@@ -304,7 +304,7 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
if (dev->num_comp_vectors > 1)
comp_vector =
atomic_inc_return(&counter) %
- min_t(int, dev->num_comp_vectors, num_online_cpus());
+ min_unsigned(dev->num_comp_vectors, num_online_cpus());
return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
caller);
@@ -382,9 +382,9 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
* a reasonable batch size so that we can share CQs between
* multiple users instead of allocating a larger number of CQs.
*/
- nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
+ nr_cqes = min_unsigned(dev->attrs.max_cqe,
max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
- nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ nr_cqs = min_unsigned(dev->num_comp_vectors, num_online_cpus());
for (i = 0; i < nr_cqs; i++) {
cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
if (IS_ERR(cq)) {
@@ -440,7 +440,7 @@ struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
}
num_comp_vectors =
- min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ min_unsigned(dev->num_comp_vectors, num_online_cpus());
/* Project the affinty to the device completion vector range */
if (comp_vector_hint < 0) {
comp_vector_hint =
@@ -681,7 +681,7 @@ void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
* even support all we need, and we'll have to live with what we get..
*/
attr->cap.max_send_wr =
- min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
+ min_unsigned(attr->cap.max_send_wr, dev->attrs.max_qp_wr);
}
int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -1359,7 +1359,7 @@ struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
* max_send_sge <= max_sge_rd.
*/
qp->max_write_sge = qp_init_attr->cap.max_send_sge;
- qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
+ qp->max_read_sge = min_unsigned(qp_init_attr->cap.max_send_sge,
device->attrs.max_sge_rd);
if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
qp->integrity_en = true;
@@ -109,7 +109,7 @@ int intel_pasid_alloc_table(struct device *dev)
return -ENOMEM;
if (info->pasid_supported)
- max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
+ max_pasid = min_unsigned(pci_max_pasids(to_pci_dev(dev)),
intel_pasid_max_id);
size = max_pasid >> (PASID_PDE_SHIFT - 3);
@@ -6277,8 +6277,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* A negative number is overly pessimistic, and causes
* obvious problems for unsigned storage. So clip to 0.
*/
- readpos -= min_t(sector_t, reshape_sectors, readpos);
- safepos -= min_t(sector_t, reshape_sectors, safepos);
+ readpos -= min_unsigned(reshape_sectors, readpos);
+ safepos -= min_unsigned(reshape_sectors, safepos);
}
/* Having calculated the 'writepos' possibly use it
@@ -3594,15 +3594,14 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* consideration as to how many l2 queues / cnqs we have.
*/
feat_num[QED_RDMA_CNQ] =
- min_t(u32, sb_cnt.cnt / 2,
+ min_unsigned(sb_cnt.cnt / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
non_l2_sbs = feat_num[QED_RDMA_CNQ];
}
if (QED_IS_L2_PERSONALITY(p_hwfn)) {
/* Start by allocating VF queues, then PF's */
- feat_num[QED_VF_L2_QUE] = min_t(u32,
- RESC_NUM(p_hwfn, QED_L2_QUEUE),
+ feat_num[QED_VF_L2_QUE] = min_unsigned(RESC_NUM(p_hwfn, QED_L2_QUEUE),
sb_cnt.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32,
sb_cnt.cnt - non_l2_sbs,
@@ -3613,17 +3612,17 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
}
if (QED_IS_FCOE_PERSONALITY(p_hwfn))
- feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
+ feat_num[QED_FCOE_CQ] = min_unsigned (sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
- feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
+ feat_num[QED_ISCSI_CQ] = min_unsigned(sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
- feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt,
+ feat_num[QED_NVMETCP_CQ] = min_unsigned(sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
@@ -4788,7 +4788,7 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
struct qed_ptt *ptt;
int rate;
- rate = min_t(int, vf_info->tx_rate, link.speed);
+ rate = min_unsigned(vf_info->tx_rate, link.speed);
ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
@@ -201,7 +201,7 @@ static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p)
if (size < 0)
return -EINVAL;
- sdev->sg_reserved_size = min_t(unsigned int, size,
+ sdev->sg_reserved_size = min_unsigned(size,
queue_max_bytes(sdev->request_queue));
return 0;
}
@@ -223,7 +223,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int ret;
struct sbitmap sb_backup;
- depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+ depth = min_unsigned(depth, scsi_device_max_queue_depth(sdev));
/*
* realloc if new shift is calculated, which is caused by setting
@@ -211,9 +211,9 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
*/
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
- bufsize = min_t(size_t, bufsize,
+ bufsize = min_unsigned(bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
- bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+ bufsize = min_unsigned(bufsize, queue_max_segments(q) << PAGE_SHIFT);
while (bufsize >= SECTOR_SIZE) {
buf = __vmalloc(bufsize,
@@ -880,7 +880,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
goto unlock;
}
- max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
+ max_append = min_unsigned(logical_to_sectors(sdkp->device, zone_blocks),
q->limits.max_segments << (PAGE_SHIFT - 9));
max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
@@ -1041,7 +1041,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
- val = min_t(int, sfp->reserve.bufflen,
+ val = min_unsigned(sfp->reserve.bufflen,
max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
@@ -1023,7 +1023,7 @@ static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
#else
const bool kmap_buf = false;
#endif
- int desc_len;
+ unsigned int desc_len;
int sgs;
struct page *vm_page;
struct scatterlist *sg;
@@ -1509,14 +1509,14 @@ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
size_t n, head;
head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
- n = min_t(size_t, count, N_TTY_BUF_SIZE - head);
+ n = min_unsigned(count, N_TTY_BUF_SIZE - head);
memcpy(read_buf_addr(ldata, head), cp, n);
ldata->read_head += n;
cp += n;
count -= n;
head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
- n = min_t(size_t, count, N_TTY_BUF_SIZE - head);
+ n = min_unsigned(count, N_TTY_BUF_SIZE - head);
memcpy(read_buf_addr(ldata, head), cp, n);
ldata->read_head += n;
}
@@ -1595,7 +1595,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
- size_t la_count = min_t(size_t, ldata->lookahead_count, count);
+ size_t la_count = min_unsigned(ldata->lookahead_count, count);
if (ldata->real_raw)
n_tty_receive_buf_real_raw(tty, cp, fp, count);
@@ -456,7 +456,7 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
if (ld->ops->receive_buf2)
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
- count = min_t(int, count, ld->tty->receive_room);
+ count = min_unsigned(count, ld->tty->receive_room);
if (count && ld->ops->receive_buf)
ld->ops->receive_buf(ld->tty, p, f, count);
}
@@ -327,10 +327,10 @@ static int vc_selection(struct vc_data *vc, struct tiocl_selection *v,
return 0;
}
- v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
- v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
- v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
- v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
+ v->xs = min_unsigned(v->xs - 1, vc->vc_cols - 1);
+ v->ys = min_unsigned(v->ys - 1, vc->vc_rows - 1);
+ v->xe = min_unsigned(v->xe - 1, vc->vc_cols - 1);
+ v->ye = min_unsigned(v->ye - 1, vc->vc_rows - 1);
if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
@@ -634,7 +634,7 @@ qh_urb_transaction (
/* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
- this_sg_len = min_t(int, sg_dma_len(sg), len);
+ this_sg_len = min_unsigned(sg_dma_len(sg), len);
} else {
sg = NULL;
buf = urb->transfer_dma;
@@ -678,7 +678,7 @@ qh_urb_transaction (
break;
sg = sg_next(sg);
buf = sg_dma_address(sg);
- this_sg_len = min_t(int, sg_dma_len(sg), len);
+ this_sg_len = min_unsigned(sg_dma_len(sg), len);
}
qtd_prev = qtd;
@@ -830,7 +830,7 @@ qh_make (
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
- qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_uperiod = min_unsigned(tmp, urb->interval);
qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
} else {
int think_time;
@@ -862,7 +862,7 @@ qh_make (
tmp = rounddown_pow_of_two(tmp);
/* Allow urb->interval to override */
- qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ qh->ps.bw_period = min_unsigned(tmp, urb->interval);
qh->ps.bw_uperiod = qh->ps.bw_period << 3;
}
}
@@ -1059,7 +1059,7 @@ iso_stream_init(
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
- stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
+ stream->ps.bw_uperiod = min_unsigned(tmp, urb->interval);
stream->uperiod = urb->interval;
stream->ps.period = urb->interval >> 3;
@@ -1102,7 +1102,7 @@ iso_stream_init(
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
- stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
+ stream->ps.bw_period = min_unsigned(tmp, urb->interval);
stream->ps.bw_uperiod = stream->ps.bw_period << 3;
stream->ps.period = urb->interval;
@@ -116,7 +116,7 @@ MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
static int number_of_tds(struct urb *urb)
{
- int len, i, num, this_sg_len;
+ unsigned int len, i, num, this_sg_len;
struct scatterlist *sg;
len = urb->transfer_buffer_length;
@@ -624,7 +624,7 @@ static void td_submit_urb (
* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
- this_sg_len = min_t(int, sg_dma_len(sg), data_len);
+ this_sg_len = min_unsigned(sg_dma_len(sg), data_len);
} else {
sg = NULL;
if (data_len)
@@ -672,7 +672,7 @@ static void td_submit_urb (
break;
sg = sg_next(sg);
data = sg_dma_address(sg);
- this_sg_len = min_t(int, sg_dma_len(sg),
+ this_sg_len = min_unsigned(sg_dma_len(sg),
data_len);
}
}
@@ -950,7 +950,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
/* urb->transfer_buffer_length may be smaller than the
* size of the scatterlist (or vice versa)
*/
- this_sg_len = min_t(int, sg_dma_len(sg), len);
+ this_sg_len = min_unsigned(sg_dma_len(sg), len);
} else {
sg = NULL;
data = urb->transfer_dma;
@@ -993,7 +993,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
break;
sg = sg_next(sg);
data = sg_dma_address(sg);
- this_sg_len = min_t(int, sg_dma_len(sg), len);
+ this_sg_len = min_unsigned(sg_dma_len(sg), len);
}
}
@@ -1244,7 +1244,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
pos %= AIO_EVENTS_PER_PAGE;
avail = min(avail, nr - ret);
- avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
+ avail = min_unsigned(avail, AIO_EVENTS_PER_PAGE - pos);
ev = kmap(page);
copy_ret = copy_to_user(event + ret, ev + pos,
@@ -1870,7 +1870,7 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
}
while (nr--) {
- u64 len = min_t(u64, stripe_len,
+ u64 len = min_unsigned(stripe_len,
cache->start + cache->length - logical[nr]);
cache->bytes_super += len;
@@ -5072,7 +5072,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
wc->reada_count = max(wc->reada_count, 2);
} else {
wc->reada_count = wc->reada_count * 3 / 2;
- wc->reada_count = min_t(int, wc->reada_count,
+ wc->reada_count = min_unsigned(wc->reada_count,
BTRFS_NODEPTRS_PER_BLOCK(fs_info));
}
@@ -1981,7 +1981,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
delalloc_start = delalloc_end + 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
- int thresh = 8192;
+ unsigned int thresh = 8192;
if (delalloc_to_write < thresh * 2)
thresh = delalloc_to_write;
@@ -1136,7 +1136,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
if (tmp <= INT_MAX)
- extend_nr = min_t(int, extend_nr, tmp);
+ extend_nr = min_unsigned(extend_nr, tmp);
}
diff = (csum_offset + extend_nr) * csum_size;
@@ -1144,7 +1144,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff = diff - btrfs_item_size(leaf, path->slots[0]);
- diff = min_t(u32, btrfs_leaf_free_space(leaf), diff);
+ diff = min_unsigned(btrfs_leaf_free_space(leaf), diff);
diff /= csum_size;
diff *= csum_size;
@@ -3436,7 +3436,7 @@ static long btrfs_fallocate(struct file *file, int mode,
break;
}
last_byte = min(extent_map_end(em), alloc_end);
- actual_end = min_t(u64, extent_map_end(em), offset + len);
+ actual_end = min_unsigned(extent_map_end(em), offset + len);
last_byte = ALIGN(last_byte, blocksize);
if (em->block_start == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
@@ -3962,7 +3962,7 @@ static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
if (whence == SEEK_DATA && start >= i_size)
return -ENXIO;
- return min_t(loff_t, start, i_size);
+ return min_unsigned(start, i_size);
}
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
@@ -1212,7 +1212,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* So here we skip inline extent creation completely.
*/
if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
- u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
+ u64 actual_end = min_unsigned(i_size_read(&inode->vfs_inode),
end + 1);
/* lets try to make an inline extent */
@@ -10171,7 +10171,7 @@ static ssize_t btrfs_encoded_read_inline(
ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
ptr = btrfs_file_extent_inline_start(item);
- encoded->len = min_t(u64, extent_start + ram_bytes,
+ encoded->len = min_unsigned(extent_start + ram_bytes,
inode->vfs_inode.i_size) - iocb->ki_pos;
ret = btrfs_encoded_io_compression_from_extent(fs_info,
btrfs_file_extent_compression(leaf, item));
@@ -10512,7 +10512,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
* We only want to return up to EOF even if the extent extends beyond
* that.
*/
- encoded->len = min_t(u64, extent_map_end(em),
+ encoded->len = min_unsigned(extent_map_end(em),
inode->vfs_inode.i_size) - iocb->ki_pos;
if (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3952,11 +3952,11 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
spin_lock(&root->qgroup_meta_rsv_lock);
if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
- num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
+ num_bytes = min_unsigned(root->qgroup_meta_rsv_prealloc,
num_bytes);
root->qgroup_meta_rsv_prealloc -= num_bytes;
} else {
- num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
+ num_bytes = min_unsigned(root->qgroup_meta_rsv_pertrans,
num_bytes);
root->qgroup_meta_rsv_pertrans -= num_bytes;
}
@@ -619,7 +619,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
loops = 0;
while ((delalloc_bytes || ordered_bytes) && loops < 3) {
u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
- long nr_pages = min_t(u64, temp, LONG_MAX);
+ long nr_pages = min_unsigned(temp, LONG_MAX);
int async_pages;
btrfs_start_delalloc_roots(fs_info, nr_pages, true);
@@ -6007,7 +6007,7 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
sub_stripes = map->sub_stripes;
factor = map->num_stripes / sub_stripes;
- *num_stripes = min_t(u64, map->num_stripes,
+ *num_stripes = min_unsigned(map->num_stripes,
sub_stripes * stripe_cnt);
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= sub_stripes;
@@ -626,7 +626,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
return -E2BIG;
while (len > 0) {
- unsigned int bytes_to_copy = min_t(unsigned int, len,
+ unsigned int bytes_to_copy = min_unsigned(len,
min_not_zero(offset_in_page(pos), PAGE_SIZE));
struct page *page;
@@ -581,7 +581,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
count++;
/* Fill in size of a hole we found */
map->m_pblk = 0;
- map->m_len = min_t(unsigned int, map->m_len, count);
+ map->m_len = min_unsigned(map->m_len, count);
goto cleanup;
}
@@ -1342,7 +1342,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
/* Fill the long name slots. */
for (i = 0; i < long_bhs; i++) {
- int copy = min_t(int, sb->s_blocksize - offset, size);
+ int copy = min_unsigned(sb->s_blocksize - offset, size);
memcpy(bhs[i]->b_data + offset, slots, copy);
mark_buffer_dirty_inode(bhs[i], dir);
offset = 0;
@@ -1353,7 +1353,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
err = fat_sync_bhs(bhs, long_bhs);
if (!err && i < nr_bhs) {
/* Fill the short name slot. */
- int copy = min_t(int, sb->s_blocksize - offset, size);
+ int copy = min_unsigned(sb->s_blocksize - offset, size);
memcpy(bhs[i]->b_data + offset, slots, copy);
mark_buffer_dirty_inode(bhs[i], dir);
if (IS_DIRSYNC(dir))
@@ -96,7 +96,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
unsigned block_bits = inode->i_blkbits;
unsigned block_size = (1 << block_bits);
size_t poff = offset_in_folio(folio, *pos);
- size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
+ size_t plen = min_unsigned(folio_size(folio) - poff, length);
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@@ -850,7 +850,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
do {
unsigned long offset = offset_in_page(pos);
- unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+ unsigned long bytes = min_unsigned(PAGE_SIZE - offset, length);
struct folio *folio;
status = iomap_write_begin(iter, pos, bytes, &folio);
@@ -906,7 +906,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
struct folio *folio;
int status;
size_t offset;
- size_t bytes = min_t(u64, SIZE_MAX, length);
+ size_t bytes = min_unsigned(SIZE_MAX, length);
status = iomap_write_begin(iter, pos, bytes, &folio);
if (status)
@@ -87,7 +87,7 @@ ssize_t kernel_read_file(struct file *file, loff_t offset, void **buf,
copied = 0;
while (copied < buf_size) {
ssize_t bytes;
- size_t wanted = min_t(size_t, buf_size - copied,
+ size_t wanted = min_unsigned(buf_size - copied,
i_size - pos);
bytes = kernel_read(file, *buf + copied, wanted, &pos);
@@ -1173,7 +1173,7 @@ static int iter_to_pipe(struct iov_iter *from,
n = DIV_ROUND_UP(left + start, PAGE_SIZE);
for (i = 0; i < n; i++) {
- int size = min_t(int, left, PAGE_SIZE - start);
+ int size = min_unsigned(left, PAGE_SIZE - start);
buf.page = pages[i];
buf.offset = start;
@@ -98,7 +98,7 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
for (i = 0; i < page_count; ++i) {
unsigned int len =
- min_t(unsigned int, PAGE_SIZE - offset, total_len);
+ min_unsigned(PAGE_SIZE - offset, total_len);
struct page *page = alloc_page(GFP_NOIO);
if (!page) {
@@ -303,7 +303,7 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
while (offset < entry->length) {
void *buff = entry->data[offset / PAGE_SIZE]
+ (offset % PAGE_SIZE);
- int bytes = min_t(int, entry->length - offset,
+ int bytes = min_unsigned(entry->length - offset,
PAGE_SIZE - (offset % PAGE_SIZE));
if (bytes >= remaining) {
@@ -120,7 +120,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
len -= pos;
memmove(buf, buf + pos, len);
}
- return min_t(ssize_t, count, len);
+ return min_unsigned(count, len);
}
/* kernfs write callback for regular sysfs files */
@@ -147,7 +147,7 @@ static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf,
if (size) {
if (size <= pos)
return -EFBIG;
- count = min_t(ssize_t, count, size - pos);
+ count = min_unsigned(count, size - pos);
}
if (!count)
return 0;
@@ -603,7 +603,7 @@ void nfs_readahead(struct readahead_control *);
static inline loff_t nfs_size_to_loff_t(__u64 size)
{
- return min_t(u64, size, OFFSET_MAX);
+ return min__t(u64, size, OFFSET_MAX);
}
static inline ino_t
@@ -432,7 +432,7 @@ static inline bool skb_frag_must_loop(struct page *p)
for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
p_off = (f_off) & (PAGE_SIZE - 1), \
p_len = skb_frag_must_loop(p) ? \
- min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
+ min_unsigned(f_len, PAGE_SIZE - p_off) : f_len, \
copied = 0; \
copied < f_len; \
copied += p_len, p++, p_off = 0, \
@@ -1087,7 +1087,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
u32 id;
progs = &cgrp->bpf.progs[atype];
- cnt = min_t(int, prog_list_length(progs), total_cnt);
+ cnt = min_unsigned(prog_list_length(progs), total_cnt);
i = 0;
hlist_for_each_entry(pl, progs, node) {
prog = prog_list_prog(pl);
@@ -1479,7 +1479,7 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg)
reg->s32_min_value = max_t(s32, reg->s32_min_value,
var32_off.value | (var32_off.mask & S32_MIN));
/* max signed is min(sign bit) | max(other bits) */
- reg->s32_max_value = min_t(s32, reg->s32_max_value,
+ reg->s32_max_value = min__t(s32, reg->s32_max_value,
var32_off.value | (var32_off.mask & S32_MAX));
reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
reg->u32_max_value = min(reg->u32_max_value,
@@ -1492,7 +1492,7 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN));
/* max signed is min(sign bit) | max(other bits) */
- reg->smax_value = min_t(s64, reg->smax_value,
+ reg->smax_value = min__t(s64, reg->smax_value,
reg->var_off.value | (reg->var_off.mask & S64_MAX));
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
@@ -1517,7 +1517,7 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
reg->s32_min_value = reg->u32_min_value =
max_t(u32, reg->s32_min_value, reg->u32_min_value);
reg->s32_max_value = reg->u32_max_value =
- min_t(u32, reg->s32_max_value, reg->u32_max_value);
+ min_unsigned(reg->s32_max_value, reg->u32_max_value);
return;
}
/* Learn sign from unsigned bounds. Signed bounds cross the sign
@@ -1529,7 +1529,7 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
*/
reg->s32_min_value = reg->u32_min_value;
reg->s32_max_value = reg->u32_max_value =
- min_t(u32, reg->s32_max_value, reg->u32_max_value);
+ min_unsigned(reg->s32_max_value, reg->u32_max_value);
} else if ((s32)reg->u32_min_value < 0) {
/* Negative. We can't learn anything from the smax, but smin
* is negative, hence safe.
@@ -1550,7 +1550,7 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
if (reg->smin_value >= 0 || reg->smax_value < 0) {
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
- reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
+ reg->smax_value = reg->umax_value = min_unsigned(reg->smax_value,
reg->umax_value);
return;
}
@@ -1562,7 +1562,7 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
* is positive, hence safe.
*/
reg->smin_value = reg->umin_value;
- reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
+ reg->smax_value = reg->umax_value = min_unsigned(reg->smax_value,
reg->umax_value);
} else if ((s64)reg->umin_value < 0) {
/* Negative. We can't learn anything from the smax, but smin
@@ -135,7 +135,7 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
strscpy(ks_namebuf, name, sizeof(ks_namebuf));
/* Work out the longest name that matches the prefix */
if (++number == 1) {
- prev_len = min_t(int, max_len-1,
+ prev_len = min_unsigned(max_len-1,
strlen(ks_namebuf));
memcpy(ks_namebuf_prev, ks_namebuf, prev_len);
ks_namebuf_prev[prev_len] = '\0';
@@ -7552,7 +7552,7 @@ void perf_prepare_sample(struct perf_event_header *header,
* Make sure this doesn't happen by using up to U16_MAX bytes
* per sample in total (rounded down to 8 byte boundary).
*/
- size = min_t(size_t, U16_MAX - header->size,
+ size = min_unsigned(U16_MAX - header->size,
event->attr.aux_sample_size);
size = rounddown(size, 8);
size = perf_prepare_sample_aux(event, data, size);
@@ -811,7 +811,7 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset;
void *insn = &uprobe->arch.insn;
- int size = sizeof(uprobe->arch.insn);
+ unsigned int size = sizeof(uprobe->arch.insn);
int len, err = -EIO;
/* Copy only available bytes, -EIO if nothing was read */
@@ -825,7 +825,7 @@ static void __disable_runtime(struct rq *rq)
raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
- diff = min_t(s64, iter->rt_runtime, want);
+ diff = min_unsigned(iter->rt_runtime, want);
iter->rt_runtime -= diff;
want -= diff;
} else {
@@ -1243,7 +1243,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
* If the next highres timer to expire is earlier than next_event, the
* idle governor needs to know that.
*/
- next_event = min_t(u64, next_event,
+ next_event = min__t(u64, next_event,
hrtimer_next_event_without(&ts->sched_timer));
return ktime_sub(next_event, now);
@@ -383,7 +383,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned nbytes,
p2 = miter.addr;
while (nbytes > n) {
- i = min_t(unsigned, nbytes - n, buf_len);
+ i = min_unsigned(nbytes - n, buf_len);
memset(p2, 0, i);
p2 += i;
nbytes -= i;
@@ -1173,7 +1173,7 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
}
if (spec.field_width > 0)
- len = min_t(int, spec.field_width, 64);
+ len = min_unsigned(spec.field_width, 64);
for (i = 0; i < len; ++i) {
if (buf < end)
@@ -2687,7 +2687,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
isize = i_size_read(inode);
if (unlikely(iocb->ki_pos >= isize))
goto put_folios;
- end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
+ end_offset = min__t(loff_t, isize, iocb->ki_pos + iter->count);
/*
* Once we start copying data, we don't want to be touching any
@@ -2707,7 +2707,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio = fbatch.folios[i];
size_t fsize = folio_size(folio);
size_t offset = iocb->ki_pos & (fsize - 1);
- size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
+ size_t bytes = min__t(loff_t, end_offset - iocb->ki_pos,
fsize - offset);
size_t copied;
@@ -259,7 +259,7 @@ static inline struct folio *gup_folio_range_next(struct page *start,
unsigned int nr = 1;
if (folio_test_large(folio))
- nr = min_t(unsigned int, npages - i,
+ nr = min_unsigned(npages - i,
folio_nr_pages(folio) - folio_page_idx(folio, next));
*ntails = nr;
@@ -7458,7 +7458,7 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
- long nr_swap_pages = get_nr_swap_pages();
+ unsigned long nr_swap_pages = get_nr_swap_pages();
if (mem_cgroup_disabled() || do_memsw_account())
return nr_swap_pages;
@@ -866,7 +866,7 @@ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
/* update left and right contig manually */
s_block->left_free = min(s_block->left_free, s_off);
if (s_index == e_index)
- s_block->right_free = min_t(int, s_block->right_free,
+ s_block->right_free = min_unsigned(s_block->right_free,
PCPU_BITMAP_BLOCK_BITS - e_off);
else
s_block->right_free = 0;
@@ -900,7 +900,7 @@ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
pcpu_block_refresh_hint(chunk, e_index);
} else {
e_block->right_free =
- min_t(int, e_block->right_free,
+ min_unsigned(e_block->right_free,
PCPU_BITMAP_BLOCK_BITS - e_off);
}
}
@@ -1224,7 +1224,7 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
/*
* Search to find a fit.
*/
- end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
+ end = min_unsigned(start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
pcpu_chunk_map_bits(chunk));
bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
align_mask, &area_off, &area_bits);
@@ -2042,7 +2042,8 @@ static void pcpu_balance_populated(void)
/* gfp flags passed to underlying allocators */
const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
struct pcpu_chunk *chunk;
- int slot, nr_to_pop, ret;
+ unsigned int slot, nr_to_pop;
+ int ret;
lockdep_assert_held(&pcpu_lock);
@@ -1016,7 +1016,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
* Don't let a single process grow so big a user can't recover
*/
if (mm) {
- long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+ unsigned long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
allowed -= min_t(long, mm->total_vm / 32, reserve);
}
@@ -651,7 +651,7 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
}
for (refs = 0; copied != 0; start = 0) {
- int size = min_t(int, copied, PAGE_SIZE - start);
+ int size = min_unsigned(copied, PAGE_SIZE - start);
struct page *head = compound_head(pages[n]);
start += (pages[n] - head) << PAGE_SHIFT;
@@ -4008,7 +4008,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) {
skb_frag_t *frag = &sinfo->frags[i];
- int shrink = min_t(int, offset, skb_frag_size(frag));
+ int shrink = min_unsigned(offset, skb_frag_size(frag));
len_free += shrink;
offset -= shrink;
@@ -1560,7 +1560,7 @@ bool __skb_flow_dissect(const struct net *net,
ret = true;
out:
- key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+ key_control->thoff = min_unsigned(nhoff, skb ? skb->len : hlen);
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
@@ -960,7 +960,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
16, 1, skb->head, headroom, false);
- seg_len = min_t(int, skb_headlen(skb), len);
+ seg_len = min_unsigned(skb_headlen(skb), len);
if (seg_len)
print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
16, 1, skb->data, seg_len, false);
@@ -979,7 +979,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
skb_frag_foreach_page(frag, skb_frag_off(frag),
skb_frag_size(frag), p, p_off, p_len,
copied) {
- seg_len = min_t(int, p_len, len);
+ seg_len = min_unsigned(p_len, len);
vaddr = kmap_atomic(p);
print_hex_dump(level, "skb frag: ",
DUMP_PREFIX_OFFSET,
@@ -2801,7 +2801,7 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
struct kvec kv;
struct msghdr msg;
- slen = min_t(int, len, skb_headlen(skb) - offset);
+ slen = min_unsigned(len, skb_headlen(skb) - offset);
kv.iov_base = skb->data + offset;
kv.iov_len = slen;
memset(&msg, 0, sizeof(msg));
@@ -2836,7 +2836,7 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
- slen = min_t(size_t, len, skb_frag_size(frag) - offset);
+ slen = min_unsigned(len, skb_frag_size(frag) - offset);
while (slen) {
ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked,
@@ -3316,7 +3316,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
return ret;
len -= hlen;
} else {
- plen = min_t(int, skb_headlen(from), len);
+ plen = min_unsigned(skb_headlen(from), len);
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
@@ -3341,7 +3341,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
if (!len)
break;
skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
- size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
+ size = min_unsigned(skb_frag_size(&skb_shinfo(to)->frags[j]),
len);
skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
len -= size;
@@ -42,7 +42,7 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
}
orig_offset = pfrag->offset;
- use = min_t(int, len, pfrag->size - orig_offset);
+ use = min_unsigned(len, pfrag->size - orig_offset);
if (!sk_wmem_schedule(sk, use)) {
ret = -ENOMEM;
goto msg_trim;
@@ -335,7 +335,7 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
msg->sg.size += copied;
while (copied) {
- use = min_t(int, copied, PAGE_SIZE - offset);
+ use = min_unsigned(copied, PAGE_SIZE - offset);
sg_set_page(&msg->sg.data[msg->sg.end],
pages[i], use, offset);
sg_unmark_end(&msg->sg.data[msg->sg.end]);
@@ -1137,7 +1137,7 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
- val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
+ val = min_unsigned(val, READ_ONCE(sysctl_wmem_max));
set_sndbuf:
/* Ensure val * 2 fits into an int, to prevent max_t()
* from treating it as a negative value.
@@ -1169,7 +1169,7 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
- __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
+ __sock_set_rcvbuf(sk, min_unsigned(val, READ_ONCE(sysctl_rmem_max)));
break;
case SO_RCVBUFFORCE:
@@ -257,7 +257,7 @@ bool icmp_global_allow(void)
if (incr)
WRITE_ONCE(icmp_global.stamp, now);
}
- credit = min_t(u32, icmp_global.credit + incr,
+ credit = min_unsigned(icmp_global.credit + incr,
READ_ONCE(sysctl_icmp_msgs_burst));
if (credit) {
/* We want to use a credit of one in average, but need to randomize
@@ -1225,7 +1225,7 @@ static int __ip_append_data(struct sock *sk,
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
- copy = min_t(int, copy, pfrag->size - pfrag->offset);
+ copy = min_unsigned(copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
@@ -1361,7 +1361,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
merge = false;
}
- copy = min_t(int, copy, pfrag->size - pfrag->offset);
+ copy = min_unsigned(copy, pfrag->size - pfrag->offset);
if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) {
if (tcp_downgrade_zcopy_pure(sk, skb))
@@ -2054,7 +2054,7 @@ static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc,
s32 copybuf_len,
struct scm_timestamping_internal *tss)
{
- u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
+ u32 offset, copylen = min_unsigned(copybuf_len, zc->recv_skip_hint);
if (!copylen)
return 0;
@@ -2230,7 +2230,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
return -EINVAL;
}
vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
- avail_len = min_t(u32, vma_len, inq);
+ avail_len = min_unsigned(vma_len, inq);
total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
if (total_bytes_to_map) {
if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
@@ -4135,7 +4135,7 @@ int do_tcp_getsockopt(struct sock *sk, int level,
if (ca_ops && ca_ops->get_info)
sz = ca_ops->get_info(sk, ~0U, &attr, &info);
- len = min_t(unsigned int, len, sz);
+ len = min_unsigned(len, sz);
if (copy_to_sockptr(optlen, &len, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, &info, len))
@@ -4181,7 +4181,7 @@ int do_tcp_getsockopt(struct sock *sk, int level,
key_len = tcp_fastopen_get_cipher(net, icsk, key) *
TCP_FASTOPEN_KEY_LENGTH;
- len = min_t(unsigned int, len, key_len);
+ len = min_unsigned(len, key_len);
if (copy_to_sockptr(optlen, &len, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, key, len))
@@ -498,7 +498,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
int room;
- room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
+ room = min_unsigned(tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
if (room <= 0)
return;
@@ -745,7 +745,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
rcvmem += 128;
do_div(rcvwin, tp->advmss);
- rcvbuf = min_t(u64, rcvwin * rcvmem,
+ rcvbuf = min_unsigned(rcvwin * rcvmem,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
if (rcvbuf > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
@@ -1050,7 +1050,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
- tp->reordering = min_t(u32, (metric + mss - 1) / mss,
+ tp->reordering = min_unsigned((metric + mss - 1) / mss,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
}
@@ -2029,7 +2029,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
if (!tcp_limit_reno_sacked(tp))
return;
- tp->reordering = min_t(u32, tp->packets_out + addend,
+ tp->reordering = min_unsigned(tp->packets_out + addend,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
tp->reord_seen++;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
@@ -245,7 +245,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
0, TCP_MAX_WSCALE);
}
/* Set the clamp no higher than max representable value */
- (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
+ (*window_clamp) = min_unsigned(U16_MAX << (*rcv_wscale), *window_clamp);
}
EXPORT_SYMBOL(tcp_select_initial_window);
@@ -1471,7 +1471,7 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
/* Reno case is special. Sigh... */
if (tcp_is_reno(tp) && decr > 0)
- tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
+ tp->sacked_out -= min_unsigned(tp->sacked_out, decr);
if (tp->lost_skb_hint &&
before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
@@ -1629,7 +1629,7 @@ static int __pskb_trim_head(struct sk_buff *skb, int len)
struct skb_shared_info *shinfo;
int i, k, eat;
- eat = min_t(int, len, skb_headlen(skb));
+ eat = min_unsigned(len, skb_headlen(skb));
if (eat) {
__skb_pull(skb, eat);
len -= eat;
@@ -2417,7 +2417,7 @@ static int tcp_mtu_probe(struct sock *sk)
len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) {
- copy = min_t(int, skb->len, probe_size - len);
+ copy = min_unsigned(skb->len, probe_size - len);
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
if (skb->len <= copy) {
@@ -2508,7 +2508,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2 * skb->truesize,
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
if (sk->sk_pacing_status == SK_PACING_NONE)
- limit = min_t(unsigned long, limit,
+ limit = min_unsigned(limit,
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
limit <<= factor;
@@ -2673,9 +2673,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- min_t(unsigned int,
- cwnd_quota,
- max_segs),
+ min_unsigned(cwnd_quota, max_segs),
nonagle);
if (skb->len > limit &&
@@ -2964,7 +2962,7 @@ u32 __tcp_select_window(struct sock *sk)
if (sk_is_mptcp(sk))
mptcp_space(sk, &free_space, &allowed_space);
- full_space = min_t(int, tp->window_clamp, allowed_space);
+ full_space = min_unsigned(tp->window_clamp, allowed_space);
if (unlikely(mss > full_space)) {
mss = full_space;
@@ -3201,7 +3199,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
diff -= tcp_skb_pcount(skb);
if (diff)
tcp_adjust_pcount(sk, skb, diff);
- avail_wnd = min_t(int, avail_wnd, cur_mss);
+ avail_wnd = min_unsigned(avail_wnd, cur_mss);
if (skb->len < avail_wnd)
tcp_retrans_try_collapse(sk, skb, avail_wnd);
}
@@ -3324,7 +3322,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
/* In case tcp_shift_skb_data() have aggregated large skbs,
* we need to make sure not sending too bigs TSO packets
*/
- segs = min_t(int, segs, max_segs);
+ segs = min_unsigned(segs, max_segs);
if (tp->retrans_out >= tp->lost_out) {
break;
@@ -3749,7 +3747,8 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_request *fo = tp->fastopen_req;
- int space, err = 0;
+ unsigned int space;
+ int err = 0;
struct sk_buff *syn_data;
tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
@@ -3923,7 +3922,7 @@ void tcp_send_delayed_ack(struct sock *sk)
ato = min(ato, max_ato);
}
- ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
+ ato = min_unsigned(ato, inet_csk(sk)->icsk_delack_max);
/* Stay within the limit we were given */
timeout = jiffies + ato;
@@ -1369,8 +1369,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
- max_desync_factor = min_t(__u32,
- idev->cnf.max_desync_factor,
+ max_desync_factor = min_unsigned(idev->cnf.max_desync_factor,
cnf_temp_preferred_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
@@ -1779,7 +1779,7 @@ static int __ip6_append_data(struct sock *sk,
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
- copy = min_t(int, copy, pfrag->size - pfrag->offset);
+ copy = min_unsigned(copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
@@ -221,7 +221,7 @@ static int genl_allocate_reserve_groups(int n_groups, int *first_id)
fits = true;
for (i = id;
- i < min_t(int, id + n_groups,
+ i < min_unsigned(id + n_groups,
mc_groups_longs * BITS_PER_LONG);
i++) {
if (test_bit(i, mc_groups)) {
@@ -1407,7 +1407,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
po_skip = po;
}
- i = j = min_t(int, po->rollover->sock, num - 1);
+ i = j = min_unsigned(po->rollover->sock, num - 1);
do {
po_next = pkt_sk(rcu_dereference(f->arr[i]));
if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
@@ -3012,7 +3012,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
- linear = max(linear, min_t(int, len, dev->hard_header_len));
+ linear = max_unsigned(linear, min_unsigned(len, dev->hard_header_len));
skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
@@ -1403,7 +1403,7 @@ void sctp_assoc_update_frag_point(struct sctp_association *asoc)
sctp_datachk_len(&asoc->stream));
if (asoc->user_frag)
- frag = min_t(int, frag, asoc->user_frag);
+ frag = min_unsigned(frag, asoc->user_frag);
frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
sctp_datachk_len(&asoc->stream));
@@ -2190,7 +2190,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
/* allow fallback to order-0 allocations */
- size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
+ size = min_unsigned(size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
@@ -390,7 +390,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
goto out3;
}
/* read buffer */
- rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
+ rbuf_len = min_unsigned(i_size - offset, rbuf_size[active]);
rc = integrity_kernel_read(file, offset, rbuf[active],
rbuf_len);
if (rc != rbuf_len) {