@@ -93,7 +93,7 @@ static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
{
- mpol_cond_put(vma->vm_policy);
+ mpol_put(vma->vm_policy);
}
#else
static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
@@ -107,7 +107,6 @@ static void hold_task_mempolicy(struct proc_maps_private *priv)
task_lock(task);
priv->task_mempolicy = get_task_policy(task);
- mpol_get(priv->task_mempolicy);
task_unlock(task);
}
static void release_task_mempolicy(struct proc_maps_private *priv)
@@ -1949,7 +1948,7 @@ static int show_numa_map(struct seq_file *m, void *v)
pol = __get_vma_policy(vma, vma->vm_start);
if (pol) {
mpol_to_str(buffer, sizeof(buffer), pol);
- mpol_cond_put(pol);
+ mpol_put(pol);
} else {
mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
}
@@ -712,7 +712,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
fail_nomem_mas_store:
unlink_anon_vmas(tmp);
fail_nomem_anon_vma_fork:
- mpol_put(vma_policy(tmp));
+ mpol_kill(vma_policy(tmp));
fail_nomem_policy:
vm_area_free(tmp);
fail_nomem:
@@ -2537,7 +2537,7 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cleanup_policy:
lockdep_free_task(p);
#ifdef CONFIG_NUMA
- mpol_put(p->mempolicy);
+ mpol_kill(p->mempolicy);
#endif
bad_fork_cleanup_delayacct:
delayacct_tsk_free(p);
@@ -1246,7 +1246,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
h->resv_huge_pages--;
}
- mpol_cond_put(mpol);
+ mpol_put(mpol);
return page;
err:
@@ -2315,7 +2315,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
if (!page)
page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
- mpol_cond_put(mpol);
+ mpol_put(mpol);
return page;
}
@@ -2351,7 +2351,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
gfp_mask = htlb_alloc_mask(h);
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
- mpol_cond_put(mpol);
+ mpol_put(mpol);
return page;
}
@@ -795,11 +795,11 @@ static int vma_replace_policy(struct vm_area_struct *vma,
old = vma->vm_policy;
vma->vm_policy = new; /* protected by mmap_lock */
- mpol_put(old);
+ mpol_kill(old);
return 0;
err_out:
- mpol_put(new);
+ mpol_kill(new);
return err;
}
@@ -890,7 +890,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
task_unlock(current);
- mpol_put(new);
+ mpol_kill(new);
goto out;
}
@@ -899,7 +899,7 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
if (new && new->mode == MPOL_INTERLEAVE)
current->il_prev = MAX_NUMNODES-1;
task_unlock(current);
- mpol_put(old);
+ mpol_kill(old);
ret = 0;
out:
NODEMASK_SCRATCH_FREE(scratch);
@@ -925,8 +925,7 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
*nodes = p->nodes;
break;
case MPOL_LOCAL:
- /* return empty node mask for local allocation */
- break;
+ /* return empty node mask for local allocation */killbreak;
default:
BUG();
}
@@ -1370,7 +1369,7 @@ static long do_mbind(unsigned long start, unsigned long len,
mmap_write_unlock(mm);
mpol_out:
- mpol_put(new);
+ mpol_kill(new);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
lru_cache_enable();
return err;
@@ -1566,7 +1565,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
new->home_node = home_node;
err = mbind_range(mm, vmstart, vmend, new);
- mpol_put(new);
+ mpol_kill(new);
if (err)
break;
}
@@ -1813,14 +1812,13 @@ static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
bool vma_policy_mof(struct vm_area_struct *vma)
{
struct mempolicy *pol;
+ bool ret = false;
if (vma->vm_ops && vma->vm_ops->get_policy) {
- bool ret = false;
-
pol = vma->vm_ops->get_policy(vma, vma->vm_start);
if (pol && (pol->flags & MPOL_F_MOF))
ret = true;
- mpol_cond_put(pol);
+ mpol_put(pol);
return ret;
}
@@ -1828,8 +1826,9 @@ bool vma_policy_mof(struct vm_area_struct *vma)
pol = vma->vm_policy;
if (!pol)
pol = get_task_policy(current);
+ mpol_put(pol);
- return pol->flags & MPOL_F_MOF;
+ return ret;
}
bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
@@ -2193,7 +2192,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
- mpol_cond_put(pol);
+ mpol_put(pol);
gfp |= __GFP_COMP;
page = alloc_page_interleave(gfp, order, nid);
if (page && order > 1)
@@ -2208,7 +2207,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
node = policy_node(gfp, pol, node);
gfp |= __GFP_COMP;
page = alloc_pages_preferred_many(gfp, order, node, pol);
- mpol_cond_put(pol);
+ mpol_put(pol);
if (page && order > 1)
prep_transhuge_page(page);
folio = (struct folio *)page;
@@ -2233,7 +2232,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
- mpol_cond_put(pol);
+ mpol_put(pol);
/*
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
@@ -2258,7 +2257,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
folio = __folio_alloc(gfp, order, preferred_nid, nmask);
- mpol_cond_put(pol);
+ mpol_put(pol);
out:
return folio;
}
@@ -2300,6 +2299,7 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
+ mpol_put(pol);
return page;
}
EXPORT_SYMBOL(alloc_pages);
@@ -2566,7 +2566,7 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
static void sp_free(struct sp_node *n)
{
- mpol_put(n->policy);
+ mpol_kill(n->policy);
kmem_cache_free(sn_cache, n);
}
@@ -2655,7 +2655,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
if (curnid != polnid)
ret = polnid;
out:
- mpol_cond_put(pol);
+ mpol_put(pol);
return ret;
}
@@ -2674,7 +2674,7 @@ void mpol_put_task_policy(struct task_struct *task)
pol = task->mempolicy;
task->mempolicy = NULL;
task_unlock(task);
- mpol_put(pol);
+ mpol_kill(pol);
}
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
@@ -2763,7 +2763,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
err_out:
if (mpol_new)
- mpol_put(mpol_new);
+ mpol_kill(mpol_new);
if (n_new)
kmem_cache_free(sn_cache, n_new);
@@ -2823,7 +2823,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
put_new:
- mpol_put(new); /* drop initial ref */
+ mpol_kill(new); /* drop initial ref */
free_scratch:
NODEMASK_SCRATCH_FREE(scratch);
put_mpol:
@@ -140,7 +140,7 @@ static void remove_vma(struct vm_area_struct *vma)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
- mpol_put(vma_policy(vma));
+ mpol_kill(vma_policy(vma));
vm_area_free(vma);
}
@@ -595,7 +595,7 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
- mpol_put(vma_policy(next));
+ mpol_kill(vma_policy(next));
vm_area_free(next);
}
@@ -836,7 +836,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
- mpol_put(vma_policy(next));
+ mpol_kill(vma_policy(next));
if (remove_next != 2)
BUG_ON(vma->vm_end < next->vm_end);
vm_area_free(next);
@@ -2253,7 +2253,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
fput(new->vm_file);
unlink_anon_vmas(new);
out_free_mpol:
- mpol_put(vma_policy(new));
+ mpol_kill(vma_policy(new));
out_free_vma:
vm_area_free(new);
validate_mm_mt(mm);
@@ -3246,7 +3246,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unlink_anon_vmas(new_vma);
out_free_mempol:
- mpol_put(vma_policy(new_vma));
+ mpol_kill(vma_policy(new_vma));
out_free_vma:
vm_area_free(new_vma);
out:
@@ -1485,7 +1485,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
{
/* Drop reference taken by mpol_shared_policy_lookup() */
- mpol_cond_put(vma->vm_policy);
+ mpol_put(vma->vm_policy);
}
static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
@@ -3528,7 +3528,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_mpol:
if (IS_ENABLED(CONFIG_NUMA)) {
- mpol_put(ctx->mpol);
+ mpol_kill(ctx->mpol);
ctx->mpol = NULL;
if (mpol_parse_str(param->string, &ctx->mpol))
goto bad_value;
@@ -3666,7 +3666,7 @@ static int shmem_reconfigure(struct fs_context *fc)
ctx->mpol = NULL;
}
raw_spin_unlock(&sbinfo->stat_lock);
- mpol_put(mpol);
+ mpol_kill(mpol);
return 0;
out:
raw_spin_unlock(&sbinfo->stat_lock);
@@ -3730,7 +3730,7 @@ static void shmem_put_super(struct super_block *sb)
free_percpu(sbinfo->ino_batch);
percpu_counter_destroy(&sbinfo->used_blocks);
- mpol_put(sbinfo->mpol);
+ mpol_kill(sbinfo->mpol);
kfree(sbinfo);
sb->s_fs_info = NULL;
}
@@ -3830,7 +3830,7 @@ static void shmem_free_fc(struct fs_context *fc)
struct shmem_options *ctx = fc->fs_private;
if (ctx) {
- mpol_put(ctx->mpol);
+ mpol_kill(ctx->mpol);
kfree(ctx);
}
}