[RFC,bpf-next,06/13] bpf: Helpers to alloc and free object id in bpf namespace
Commit Message
Introduce generic helpers to alloc bpf_{map,prog,link} in bpf namespace.
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
include/linux/bpf_namespace.h | 36 ++++++++++++++++++
kernel/bpf/bpf_namespace.c | 86 +++++++++++++++++++++++++++++++++++++++++++
kernel/bpf/syscall.c | 6 +--
3 files changed, 125 insertions(+), 3 deletions(-)
@@ -38,9 +38,45 @@ struct bpf_namespace {
extern struct bpf_namespace init_bpf_ns;
extern struct proc_ns_operations bpfns_operations;
+extern spinlock_t map_idr_lock;
+extern spinlock_t prog_idr_lock;
+extern spinlock_t link_idr_lock;
struct bpf_namespace *copy_bpfns(unsigned long flags,
struct user_namespace *user_ns,
struct bpf_namespace *old_ns);
void put_bpfns(struct bpf_namespace *ns);
+struct bpf_obj_id *bpf_alloc_obj_id(struct bpf_namespace *ns,
+ void *obj, int type);
+void bpf_free_obj_id(struct bpf_obj_id *obj_id, int type);
+
+/*
+ * The helpers to get the bpf_id's id seen from different namespaces
+ *
+ * bpf_id_nr() : global id, i.e. the id seen from the init namespace;
+ * bpf_id_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * bpf_id_nr_ns() : id seen from the ns specified.
+ *
+ * see also task_xid_nr() etc in include/linux/sched.h
+ */
+static inline int bpf_obj_id_nr(struct bpf_obj_id *obj_id)
+{
+ if (obj_id)
+ return obj_id->numbers[0].nr;
+ return 0;
+}
+
+static inline int bpf_obj_id_nr_ns(struct bpf_obj_id *obj_id,
+ struct bpf_namespace *ns)
+{
+ if (obj_id && ns->level <= obj_id->level)
+ return obj_id->numbers[ns->level].nr;
+ return 0;
+}
+
+static inline int bpf_obj_id_vnr(struct bpf_obj_id *obj_id)
+{
+ return bpf_obj_id_nr_ns(obj_id, current->nsproxy->bpf_ns);
+}
#endif /* _LINUX_BPF_ID_NS_H */
@@ -217,3 +217,89 @@ static __init int bpf_namespaces_init(void)
}
late_initcall(bpf_namespaces_init);
+
+struct bpf_obj_id *bpf_alloc_obj_id(struct bpf_namespace *ns,
+ void *obj, int type)
+{
+ struct bpf_namespace *tmp = ns;
+ struct bpf_obj_id *obj_id;
+ spinlock_t *idr_lock;
+ unsigned long flags;
+ int id;
+ int i;
+
+ switch (type) {
+ case MAP_OBJ_ID:
+ idr_lock = &map_idr_lock;
+ break;
+ case PROG_OBJ_ID:
+ idr_lock = &prog_idr_lock;
+ break;
+ case LINK_OBJ_ID:
+ idr_lock = &link_idr_lock;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ obj_id = kmem_cache_alloc(ns->obj_id_cachep, GFP_KERNEL);
+ if (!obj_id)
+ return ERR_PTR(-ENOMEM);
+
+ obj_id->level = ns->level;
+ for (i = ns->level; i >= 0; i--) {
+ idr_preload(GFP_KERNEL);
+ spin_lock_bh(idr_lock);
+ id = idr_alloc_cyclic(&tmp->idr[type], obj, 1, INT_MAX, GFP_ATOMIC);
+ spin_unlock_bh(idr_lock);
+ idr_preload_end();
+ if (id < 0)
+ goto out_free;
+ obj_id->numbers[i].nr = id;
+ obj_id->numbers[i].ns = tmp;
+ tmp = tmp->parent;
+ }
+
+ return obj_id;
+
+out_free:
+ for (; i <= ns->level; i++) {
+ tmp = obj_id->numbers[i].ns;
+ spin_lock_irqsave(idr_lock, flags);
+ idr_remove(&tmp->idr[type], obj_id->numbers[i].nr);
+ spin_unlock_irqrestore(idr_lock, flags);
+ }
+ kmem_cache_free(ns->obj_id_cachep, obj_id);
+ return ERR_PTR(id);
+}
+
+void bpf_free_obj_id(struct bpf_obj_id *obj_id, int type)
+{
+ struct bpf_namespace *ns;
+ spinlock_t *idr_lock;
+ unsigned long flags;
+ int i;
+
+ switch (type) {
+ case MAP_OBJ_ID:
+ idr_lock = &map_idr_lock;
+ break;
+ case PROG_OBJ_ID:
+ idr_lock = &prog_idr_lock;
+ break;
+ case LINK_OBJ_ID:
+ idr_lock = &link_idr_lock;
+ break;
+ default:
+ return;
+ }
+ /* Note that the level-0 should be freed at last */
+ for (i = obj_id->level; i >= 0; i--) {
+ spin_lock_irqsave(idr_lock, flags);
+ ns = obj_id->numbers[i].ns;
+ idr_remove(&ns->idr[type], obj_id->numbers[i].nr);
+ spin_unlock_irqrestore(idr_lock, flags);
+ }
+ ns = obj_id->numbers[obj_id->level].ns;
+ kmem_cache_free(ns->obj_id_cachep, obj_id);
+}
@@ -48,11 +48,11 @@
DEFINE_PER_CPU(int, bpf_prog_active);
static DEFINE_IDR(prog_idr);
-static DEFINE_SPINLOCK(prog_idr_lock);
+DEFINE_SPINLOCK(prog_idr_lock);
static DEFINE_IDR(map_idr);
-static DEFINE_SPINLOCK(map_idr_lock);
+DEFINE_SPINLOCK(map_idr_lock);
static DEFINE_IDR(link_idr);
-static DEFINE_SPINLOCK(link_idr_lock);
+DEFINE_SPINLOCK(link_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly =
IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;