[RFC,5/5] perf: Add mmap to the sideband ioctl

Message ID 20230414082300.34798-6-adrian.hunter@intel.com
State New
Headers
Series perf: Add ioctl to emit sideband events |

Commit Message

Adrian Hunter April 14, 2023, 8:23 a.m. UTC
  Support the case of output to an active event, and return an error if
output is not possible in that case. Set PERF_RECORD_MISC_STATUS_ONLY to
differentiate the ioctl status-only sideband event from a "real" sideband
event.

Set the mmap pid/tid from the appropriate task.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
---
 kernel/events/core.c | 91 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 73 insertions(+), 18 deletions(-)
  

Patch

diff --git a/kernel/events/core.c b/kernel/events/core.c
index cddc02c2e411..317bdf5f919a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8584,6 +8584,7 @@  static void perf_event_cgroup(struct cgroup *cgrp)
 
 struct perf_mmap_event {
 	struct vm_area_struct	*vma;
+	struct task_struct	*task;
 
 	const char		*file_name;
 	int			file_size;
@@ -8605,19 +8606,25 @@  struct perf_mmap_event {
 	} event_id;
 };
 
+static int perf_event_mmap_match_vma(struct perf_event *event,
+				     struct vm_area_struct *vma)
+{
+	int executable = vma->vm_flags & VM_EXEC;
+
+	return (!executable && event->attr.mmap_data) ||
+	       (executable && (event->attr.mmap || event->attr.mmap2));
+}
+
 static int perf_event_mmap_match(struct perf_event *event,
 				 void *data)
 {
 	struct perf_mmap_event *mmap_event = data;
 	struct vm_area_struct *vma = mmap_event->vma;
-	int executable = vma->vm_flags & VM_EXEC;
 
-	return (!executable && event->attr.mmap_data) ||
-	       (executable && (event->attr.mmap || event->attr.mmap2));
+	return perf_event_mmap_match_vma(event, vma);
 }
 
-static void perf_event_mmap_output(struct perf_event *event,
-				   void *data)
+static int perf_event_mmap_output(struct perf_event *event, void *data)
 {
 	struct perf_mmap_event *mmap_event = data;
 	struct perf_output_handle handle;
@@ -8628,7 +8635,7 @@  static void perf_event_mmap_output(struct perf_event *event,
 	int ret;
 
 	if (!perf_event_mmap_match(event, data))
-		return;
+		return -ENOENT;
 
 	if (event->attr.mmap2) {
 		mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
@@ -8646,8 +8653,8 @@  static void perf_event_mmap_output(struct perf_event *event,
 	if (ret)
 		goto out;
 
-	mmap_event->event_id.pid = perf_event_pid(event, current);
-	mmap_event->event_id.tid = perf_event_tid(event, current);
+	mmap_event->event_id.pid = perf_event_pid(event, mmap_event->task);
+	mmap_event->event_id.tid = perf_event_tid(event, mmap_event->task);
 
 	use_build_id = event->attr.build_id && mmap_event->build_id_size;
 
@@ -8681,9 +8688,10 @@  static void perf_event_mmap_output(struct perf_event *event,
 out:
 	mmap_event->event_id.header.size = size;
 	mmap_event->event_id.header.type = type;
+	return ret;
 }
 
-static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+static int perf_event_mmap_event(struct perf_mmap_event *mmap_event, struct perf_event *event)
 {
 	struct vm_area_struct *vma = mmap_event->vma;
 	struct file *file = vma->vm_file;
@@ -8694,6 +8702,7 @@  static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
 	char tmp[16];
 	char *buf = NULL;
 	char *name;
+	int ret;
 
 	if (vma->vm_flags & VM_READ)
 		prot |= PROT_READ;
@@ -8795,11 +8804,10 @@  static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
 	if (atomic_read(&nr_build_id_events))
 		build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
 
-	perf_iterate_sb(perf_event_mmap_output,
-		       mmap_event,
-		       NULL);
+	ret = perf_output_sb(perf_event_mmap_output, mmap_event, NULL, event);
 
 	kfree(buf);
+	return ret;
 }
 
 /*
@@ -8899,21 +8907,25 @@  static void perf_addr_filters_adjust(struct vm_area_struct *vma)
 	rcu_read_unlock();
 }
 
-void perf_event_mmap(struct vm_area_struct *vma)
+static int __perf_event_mmap(struct vm_area_struct *vma,
+			     struct perf_event *event,
+			     struct task_struct *task)
 {
 	struct perf_mmap_event mmap_event;
 
 	if (!atomic_read(&nr_mmap_events))
-		return;
+		return -ENOENT;
 
 	mmap_event = (struct perf_mmap_event){
 		.vma	= vma,
+		.task	= task ?: current,
 		/* .file_name */
 		/* .file_size */
 		.event_id  = {
 			.header = {
 				.type = PERF_RECORD_MMAP,
-				.misc = PERF_RECORD_MISC_USER,
+				.misc = PERF_RECORD_MISC_USER |
+					(event ? PERF_RECORD_MISC_STATUS_ONLY : 0),
 				/* .size */
 			},
 			/* .pid */
@@ -8930,8 +8942,14 @@  void perf_event_mmap(struct vm_area_struct *vma)
 		/* .flags (attr_mmap2 only) */
 	};
 
-	perf_addr_filters_adjust(vma);
-	perf_event_mmap_event(&mmap_event);
+	if (!event)
+		perf_addr_filters_adjust(vma);
+	return perf_event_mmap_event(&mmap_event, event);
+}
+
+void perf_event_mmap(struct vm_area_struct *vma)
+{
+	__perf_event_mmap(vma, NULL, NULL);
 }
 
 void perf_event_aux_event(struct perf_event *event, unsigned long head,
@@ -12901,9 +12919,46 @@  static int perf_event_emit_comm(struct perf_event *event, struct task_struct *ta
 	return __perf_event_comm(task, false, event);
 }
 
+static int perf_event_mm_emit_mmap(struct perf_event *event,
+				   struct task_struct *task,
+				   struct mm_struct *mm)
+{
+	struct vm_area_struct *vma;
+	VMA_ITERATOR(vmi, mm, 0);
+	int err;
+
+	for_each_vma(vmi, vma) {
+		if (!perf_event_mmap_match_vma(event, vma))
+			continue;
+		err = __perf_event_mmap(vma, event, task);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int perf_event_emit_mmap(struct perf_event *event, struct task_struct *task)
 {
-	return -EINVAL;
+	struct mm_struct *mm;
+	int err;
+
+	if (!event->attr.mmap_data && !event->attr.mmap && !event->attr.mmap2)
+		return -EINVAL;
+
+	mm = get_task_mm(task);
+	if (!mm)
+		return 0;
+
+	mmap_read_lock(mm);
+
+	err = perf_event_mm_emit_mmap(event, task, mm);
+
+	mmap_read_unlock(mm);
+
+	mmput(mm);
+
+	return err;
 }
 
 static int perf_event_emit_sideband(struct perf_event *event, void __user *arg)