perf bench sched pipe: Add -G/--cgroups option

Message ID 20231004204741.985422-1-namhyung@kernel.org
State New
Headers
Series perf bench sched pipe: Add -G/--cgroups option |

Commit Message

Namhyung Kim Oct. 4, 2023, 8:47 p.m. UTC
  The -G/--cgroups option is to put sender and receiver in different
cgroups in order to measure cgroup context switch overheads.

Users need to make sure the cgroups exist and accessible.

  # perf stat -e context-switches,cgroup-switches \
  > taskset -c 0 perf bench sched pipe -l 10000 > /dev/null

   Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000':

              20,001      context-switches
                   2      cgroup-switches

         0.053449651 seconds time elapsed

         0.011286000 seconds user
         0.041869000 seconds sys

  # perf stat -e context-switches,cgroup-switches \
  > taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB > /dev/null

   Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB':

              20,001      context-switches
              20,001      cgroup-switches

         0.052768627 seconds time elapsed

         0.006284000 seconds user
         0.046266000 seconds sys

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/Documentation/perf-bench.txt | 19 +++++
 tools/perf/bench/sched-pipe.c           | 93 +++++++++++++++++++++++++
 2 files changed, 112 insertions(+)
  

Comments

Arnaldo Carvalho de Melo Oct. 13, 2023, 10:40 p.m. UTC | #1
Em Wed, Oct 04, 2023 at 01:47:41PM -0700, Namhyung Kim escreveu:
> The -G/--cgroups option is to put sender and receiver in different
> cgroups in order to measure cgroup context switch overheads.
> 
> Users need to make sure the cgroups exist and accessible.
> 
>   # perf stat -e context-switches,cgroup-switches \
>   > taskset -c 0 perf bench sched pipe -l 10000 > /dev/null
> 
>    Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000':
> 
>               20,001      context-switches
>                    2      cgroup-switches
> 
>          0.053449651 seconds time elapsed
> 
>          0.011286000 seconds user
>          0.041869000 seconds sys
> 
>   # perf stat -e context-switches,cgroup-switches \
>   > taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB > /dev/null
> 
>    Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB':
> 
>               20,001      context-switches
>               20,001      cgroup-switches
> 
>          0.052768627 seconds time elapsed

So I tried with:

[root@quaco ~]# perf bench sched pipe -G system.slice,user.slice
# Running 'sched/pipe' benchmark:
cannot enter to cgroup: system.slice
cannot enter to cgroup: user.slice
# Executed 1000000 pipe operations between two processes

     Total time: 6.301 [sec]

       6.301478 usecs/op
         158692 ops/sec
[root@quaco ~]#

Should't it bail out when not managing to enter the cgroups?

Also:

[root@quaco ~]# mkdir /sys/fs/cgroup/AAA
[root@quaco ~]# mkdir /sys/fs/cgroup/BBB
[root@quaco ~]#
[root@quaco ~]# perf bench sched pipe -G AAA,BBB
# Running 'sched/pipe' benchmark:
cannot enter to cgroup: AAA
cannot enter to cgroup: BBB
# Executed 1000000 pipe operations between two processes

     Total time: 6.397 [sec]

       6.397830 usecs/op
         156302 ops/sec
[root@quaco ~]#

-rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.procs
[root@quaco ~]# ls -la /sys/fs/cgroup/AAA/cgroup.threads
-rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.threads
[root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.threads
-rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.threads
[root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.procs
-rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.procs
[root@quaco ~]#

[root@quaco ~]# perf bench sched pipe -G AAA,BBB
# Running 'sched/pipe' benchmark:
cannot enter to cgroup: AAA (written=-1, len=6, errno=Operation not supported)
cannot enter to cgroup: BBB (written=-1, len=6, errno=Operation not supported)
# Executed 1000000 pipe operations between two processes

     Total time: 6.303 [sec]

       6.303221 usecs/op
         158649 ops/sec
[root@quaco ~]#

I'm certainly missing something here :-\

- Arnaldo
  
Namhyung Kim Oct. 13, 2023, 10:57 p.m. UTC | #2
Hi Arnaldo,

On Fri, Oct 13, 2023 at 3:41 PM Arnaldo Carvalho de Melo
<acme@kernel.org> wrote:
>
> Em Wed, Oct 04, 2023 at 01:47:41PM -0700, Namhyung Kim escreveu:
> > The -G/--cgroups option is to put sender and receiver in different
> > cgroups in order to measure cgroup context switch overheads.
> >
> > Users need to make sure the cgroups exist and accessible.
> >
> >   # perf stat -e context-switches,cgroup-switches \
> >   > taskset -c 0 perf bench sched pipe -l 10000 > /dev/null
> >
> >    Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000':
> >
> >               20,001      context-switches
> >                    2      cgroup-switches
> >
> >          0.053449651 seconds time elapsed
> >
> >          0.011286000 seconds user
> >          0.041869000 seconds sys
> >
> >   # perf stat -e context-switches,cgroup-switches \
> >   > taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB > /dev/null
> >
> >    Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB':
> >
> >               20,001      context-switches
> >               20,001      cgroup-switches
> >
> >          0.052768627 seconds time elapsed
>
> So I tried with:
>
> [root@quaco ~]# perf bench sched pipe -G system.slice,user.slice
> # Running 'sched/pipe' benchmark:
> cannot enter to cgroup: system.slice
> cannot enter to cgroup: user.slice
> # Executed 1000000 pipe operations between two processes
>
>      Total time: 6.301 [sec]
>
>        6.301478 usecs/op
>          158692 ops/sec
> [root@quaco ~]#
>
> Should't it bail out when not managing to enter the cgroups?

Hmm.. right.  Will fix.

>
> Also:
>
> [root@quaco ~]# mkdir /sys/fs/cgroup/AAA
> [root@quaco ~]# mkdir /sys/fs/cgroup/BBB
> [root@quaco ~]#
> [root@quaco ~]# perf bench sched pipe -G AAA,BBB
> # Running 'sched/pipe' benchmark:
> cannot enter to cgroup: AAA
> cannot enter to cgroup: BBB
> # Executed 1000000 pipe operations between two processes
>
>      Total time: 6.397 [sec]
>
>        6.397830 usecs/op
>          156302 ops/sec
> [root@quaco ~]#
>
> -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.procs
> [root@quaco ~]# ls -la /sys/fs/cgroup/AAA/cgroup.threads
> -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.threads
> [root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.threads
> -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.threads
> [root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.procs
> -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.procs
> [root@quaco ~]#
>
> [root@quaco ~]# perf bench sched pipe -G AAA,BBB
> # Running 'sched/pipe' benchmark:
> cannot enter to cgroup: AAA (written=-1, len=6, errno=Operation not supported)
> cannot enter to cgroup: BBB (written=-1, len=6, errno=Operation not supported)
> # Executed 1000000 pipe operations between two processes
>
>      Total time: 6.303 [sec]
>
>        6.303221 usecs/op
>          158649 ops/sec
> [root@quaco ~]#
>
> I'm certainly missing something here :-\

Try to enable some cgroup controllers first.  Like

  # echo +cpu > /sys/fs/cgroup/AAA/cgroup.subtree_control
  # echo +cpu > /sys/fs/cgroup/BBB/cgroup.subtree_control

Thanks,
Namhyung
  
Arnaldo Carvalho de Melo Oct. 13, 2023, 11:03 p.m. UTC | #3
Em Fri, Oct 13, 2023 at 03:57:27PM -0700, Namhyung Kim escreveu:
> Hi Arnaldo,
> 
> On Fri, Oct 13, 2023 at 3:41 PM Arnaldo Carvalho de Melo
> <acme@kernel.org> wrote:
> >
> > Em Wed, Oct 04, 2023 at 01:47:41PM -0700, Namhyung Kim escreveu:
> > > The -G/--cgroups option is to put sender and receiver in different
> > > cgroups in order to measure cgroup context switch overheads.
> > >
> > > Users need to make sure the cgroups exist and accessible.
> > >
> > >   # perf stat -e context-switches,cgroup-switches \
> > >   > taskset -c 0 perf bench sched pipe -l 10000 > /dev/null
> > >
> > >    Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000':
> > >
> > >               20,001      context-switches
> > >                    2      cgroup-switches
> > >
> > >          0.053449651 seconds time elapsed
> > >
> > >          0.011286000 seconds user
> > >          0.041869000 seconds sys
> > >
> > >   # perf stat -e context-switches,cgroup-switches \
> > >   > taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB > /dev/null
> > >
> > >    Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 10000 -G AAA,BBB':
> > >
> > >               20,001      context-switches
> > >               20,001      cgroup-switches
> > >
> > >          0.052768627 seconds time elapsed
> >
> > So I tried with:
> >
> > [root@quaco ~]# perf bench sched pipe -G system.slice,user.slice
> > # Running 'sched/pipe' benchmark:
> > cannot enter to cgroup: system.slice
> > cannot enter to cgroup: user.slice
> > # Executed 1000000 pipe operations between two processes
> >
> >      Total time: 6.301 [sec]
> >
> >        6.301478 usecs/op
> >          158692 ops/sec
> > [root@quaco ~]#
> >
> > Should't it bail out when not managing to enter the cgroups?
> 
> Hmm.. right.  Will fix.
> 
> >
> > Also:
> >
> > [root@quaco ~]# mkdir /sys/fs/cgroup/AAA
> > [root@quaco ~]# mkdir /sys/fs/cgroup/BBB
> > [root@quaco ~]#
> > [root@quaco ~]# perf bench sched pipe -G AAA,BBB
> > # Running 'sched/pipe' benchmark:
> > cannot enter to cgroup: AAA
> > cannot enter to cgroup: BBB
> > # Executed 1000000 pipe operations between two processes
> >
> >      Total time: 6.397 [sec]
> >
> >        6.397830 usecs/op
> >          156302 ops/sec
> > [root@quaco ~]#
> >
> > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.procs
> > [root@quaco ~]# ls -la /sys/fs/cgroup/AAA/cgroup.threads
> > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.threads
> > [root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.threads
> > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.threads
> > [root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.procs
> > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.procs
> > [root@quaco ~]#
> >
> > [root@quaco ~]# perf bench sched pipe -G AAA,BBB
> > # Running 'sched/pipe' benchmark:
> > cannot enter to cgroup: AAA (written=-1, len=6, errno=Operation not supported)
> > cannot enter to cgroup: BBB (written=-1, len=6, errno=Operation not supported)
> > # Executed 1000000 pipe operations between two processes
> >
> >      Total time: 6.303 [sec]
> >
> >        6.303221 usecs/op
> >          158649 ops/sec
> > [root@quaco ~]#
> >
> > I'm certainly missing something here :-\
> 
> Try to enable some cgroup controllers first.  Like
> 
>   # echo +cpu > /sys/fs/cgroup/AAA/cgroup.subtree_control
>   # echo +cpu > /sys/fs/cgroup/BBB/cgroup.subtree_control

[root@quaco cgroup]# echo +cpu > /sys/fs/cgroup/AAA/cgroup.subtree_control
[root@quaco cgroup]# echo +cpu > /sys/fs/cgroup/BBB/cgroup.subtree_control
[root@quaco cgroup]# perf bench sched pipe -G AAA,BBB
# Running 'sched/pipe' benchmark:
cannot enter to cgroup: AAA (written=-1, len=6, errno=Operation not supported)
cannot enter to cgroup: BBB (written=-1, len=6, errno=Operation not supported)
# Executed 1000000 pipe operations between two processes

     Total time: 6.267 [sec]

       6.267680 usecs/op
         159548 ops/sec
[root@quaco cgroup]# cat /sys/fs/cgroup/AAA/cgroup.subtree_control
cpu
[root@quaco cgroup]# cat /sys/fs/cgroup/BBB/cgroup.subtree_control
cpu
[root@quaco cgroup]# uname -a
Linux quaco 6.5.6-200.fc38.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Oct  6 19:02:35 UTC 2023 x86_64 GNU/Linux
[root@quaco cgroup]#
  
Namhyung Kim Oct. 13, 2023, 11:10 p.m. UTC | #4
On Fri, Oct 13, 2023 at 4:03 PM Arnaldo Carvalho de Melo
<acme@kernel.org> wrote:
>
> Em Fri, Oct 13, 2023 at 03:57:27PM -0700, Namhyung Kim escreveu:
> > Hi Arnaldo,
> >
> > On Fri, Oct 13, 2023 at 3:41 PM Arnaldo Carvalho de Melo
> > <acme@kernel.org> wrote:
> > > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.procs
> > > [root@quaco ~]# ls -la /sys/fs/cgroup/AAA/cgroup.threads
> > > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/AAA/cgroup.threads
> > > [root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.threads
> > > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.threads
> > > [root@quaco ~]# ls -la /sys/fs/cgroup/BBB/cgroup.procs
> > > -rw-r--r--. 1 root root 0 Oct 13 19:22 /sys/fs/cgroup/BBB/cgroup.procs
> > > [root@quaco ~]#
> > >
> > > [root@quaco ~]# perf bench sched pipe -G AAA,BBB
> > > # Running 'sched/pipe' benchmark:
> > > cannot enter to cgroup: AAA (written=-1, len=6, errno=Operation not supported)
> > > cannot enter to cgroup: BBB (written=-1, len=6, errno=Operation not supported)
> > > # Executed 1000000 pipe operations between two processes
> > >
> > >      Total time: 6.303 [sec]
> > >
> > >        6.303221 usecs/op
> > >          158649 ops/sec
> > > [root@quaco ~]#
> > >
> > > I'm certainly missing something here :-\
> >
> > Try to enable some cgroup controllers first.  Like
> >
> >   # echo +cpu > /sys/fs/cgroup/AAA/cgroup.subtree_control
> >   # echo +cpu > /sys/fs/cgroup/BBB/cgroup.subtree_control
>
> [root@quaco cgroup]# echo +cpu > /sys/fs/cgroup/AAA/cgroup.subtree_control
> [root@quaco cgroup]# echo +cpu > /sys/fs/cgroup/BBB/cgroup.subtree_control
> [root@quaco cgroup]# perf bench sched pipe -G AAA,BBB
> # Running 'sched/pipe' benchmark:
> cannot enter to cgroup: AAA (written=-1, len=6, errno=Operation not supported)
> cannot enter to cgroup: BBB (written=-1, len=6, errno=Operation not supported)
> # Executed 1000000 pipe operations between two processes
>
>      Total time: 6.267 [sec]
>
>        6.267680 usecs/op
>          159548 ops/sec
> [root@quaco cgroup]# cat /sys/fs/cgroup/AAA/cgroup.subtree_control
> cpu
> [root@quaco cgroup]# cat /sys/fs/cgroup/BBB/cgroup.subtree_control
> cpu
> [root@quaco cgroup]# uname -a
> Linux quaco 6.5.6-200.fc38.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Oct  6 19:02:35 UTC 2023 x86_64 GNU/Linux
> [root@quaco cgroup]#

Hmm... I don't know, I've tested it on v1 only ;-p  I'll take a look.

Also I found a bug, will fix too.

Thanks,
Namhyung


diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 1103fd658d5d..25236f339b90 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -118,9 +118,9 @@ static void enter_cgroup(struct cgroup *cgrp)

        /* try cgroup v2 interface first */
        if (threaded)
-               fd = openat(cgrp->fd, "cgroup.procs", O_WRONLY);
-       else
                fd = openat(cgrp->fd, "cgroup.threads", O_WRONLY);
+       else
+               fd = openat(cgrp->fd, "cgroup.progs", O_WRONLY);

        /* try cgroup v1 if failed */
        if (fd < 0)
  
Namhyung Kim Oct. 13, 2023, 11:20 p.m. UTC | #5
On Fri, Oct 13, 2023 at 4:10 PM Namhyung Kim <namhyung@kernel.org> wrote:
> diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
> index 1103fd658d5d..25236f339b90 100644
> --- a/tools/perf/bench/sched-pipe.c
> +++ b/tools/perf/bench/sched-pipe.c
> @@ -118,9 +118,9 @@ static void enter_cgroup(struct cgroup *cgrp)
>
>         /* try cgroup v2 interface first */
>         if (threaded)
> -               fd = openat(cgrp->fd, "cgroup.procs", O_WRONLY);
> -       else
>                 fd = openat(cgrp->fd, "cgroup.threads", O_WRONLY);
> +       else
> +               fd = openat(cgrp->fd, "cgroup.progs", O_WRONLY);
>
>         /* try cgroup v1 if failed */
>         if (fd < 0)

Oh, actually it should be 'procs' instead of 'progs'.

Thanks,
Namhyung
  

Patch

diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index ca5789625cd2..8331bd28b10e 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -124,6 +124,14 @@  Options of *pipe*
 --loop=::
 Specify number of loops.
 
+-G::
+--cgroups=::
+Names of cgroups for sender and receiver, separated by a comma.
+This is useful to check cgroup context switching overhead.
+Note that perf doesn't create nor delete the cgroups, so users should
+make sure that the cgroups exist and are accessible before use.
+
+
 Example of *pipe*
 ^^^^^^^^^^^^^^^^^
 
@@ -141,6 +149,17 @@  Example of *pipe*
         Total time:0.016 sec
                 16.948000 usecs/op
                 59004 ops/sec
+
+% perf bench sched pipe -G AAA,BBB
+(executing 1000000 pipe operations between cgroups)
+# Running 'sched/pipe' benchmark:
+# Executed 1000000 pipe operations between two processes
+
+     Total time: 6.886 [sec]
+
+       6.886208 usecs/op
+         145217 ops/sec
+
 ---------------------
 
 SUITES FOR 'syscall'
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index a960e7a93aec..1103fd658d5d 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -11,6 +11,7 @@ 
  */
 #include <subcmd/parse-options.h>
 #include "bench.h"
+#include "util/cgroup.h"
 
 #include <unistd.h>
 #include <stdio.h>
@@ -19,6 +20,7 @@ 
 #include <sys/wait.h>
 #include <string.h>
 #include <errno.h>
+#include <fcntl.h>
 #include <assert.h>
 #include <sys/time.h>
 #include <sys/types.h>
@@ -40,9 +42,55 @@  static	int			loops = LOOPS_DEFAULT;
 /* Use processes by default: */
 static bool			threaded;
 
+static struct cgroup *cgrp_send = NULL;
+static struct cgroup *cgrp_recv = NULL;
+
+static int parse_two_cgroups(const struct option *opt __maybe_unused,
+			     const char *str, int unset __maybe_unused)
+{
+	char *p = strdup(str);
+	char *q;
+	int ret = -1;
+
+	if (p == NULL) {
+		fprintf(stderr, "memory allocation failure");
+		return -1;
+	}
+
+	q = strchr(p, ',');
+	if (q == NULL) {
+		fprintf(stderr, "it should have two cgroup names: %s", p);
+		goto out;
+	}
+	*q = '\0';
+
+	cgrp_send = cgroup__new(p, /*do_open=*/true);
+	if (cgrp_send == NULL) {
+		fprintf(stderr, "cannot open sender cgroup: %s", p);
+		goto out;
+	}
+
+	/* skip ',' */
+	q++;
+
+	cgrp_recv = cgroup__new(q, /*do_open=*/true);
+	if (cgrp_recv == NULL) {
+		fprintf(stderr, "cannot open receiver cgroup: %s", q);
+		goto out;
+	}
+	ret = 0;
+
+out:
+	free(p);
+	return ret;
+}
+
 static const struct option options[] = {
 	OPT_INTEGER('l', "loop",	&loops,		"Specify number of loops"),
 	OPT_BOOLEAN('T', "threaded",	&threaded,	"Specify threads/process based task setup"),
+	OPT_CALLBACK('G', "cgroups", NULL, "SEND,RECV",
+		     "Put sender and receivers in given cgroups",
+		     parse_two_cgroups),
 	OPT_END()
 };
 
@@ -51,12 +99,54 @@  static const char * const bench_sched_pipe_usage[] = {
 	NULL
 };
 
+static void enter_cgroup(struct cgroup *cgrp)
+{
+	char buf[32];
+	int fd, len;
+	pid_t pid;
+
+	if (cgrp == NULL)
+		return;
+
+	if (threaded)
+		pid = syscall(__NR_gettid);
+	else
+		pid = getpid();
+
+	snprintf(buf, sizeof(buf), "%d\n", pid);
+	len = strlen(buf);
+
+	/* try cgroup v2 interface first */
+	if (threaded)
+		fd = openat(cgrp->fd, "cgroup.procs", O_WRONLY);
+	else
+		fd = openat(cgrp->fd, "cgroup.threads", O_WRONLY);
+
+	/* try cgroup v1 if failed */
+	if (fd < 0)
+		fd = openat(cgrp->fd, "tasks", O_WRONLY);
+
+	if (fd < 0) {
+		printf("failed to open cgroup file in %s\n", cgrp->name);
+		return;
+	}
+
+	if (write(fd, buf, len) != len)
+		printf("cannot enter to cgroup: %s\n", cgrp->name);
+	close(fd);
+}
+
 static void *worker_thread(void *__tdata)
 {
 	struct thread_data *td = __tdata;
 	int m = 0, i;
 	int ret;
 
+	if (td->nr)
+		enter_cgroup(cgrp_send);
+	else
+		enter_cgroup(cgrp_recv);
+
 	for (i = 0; i < loops; i++) {
 		if (!td->nr) {
 			ret = read(td->pipe_read, &m, sizeof(int));
@@ -147,6 +237,9 @@  int bench_sched_pipe(int argc, const char **argv)
 	gettimeofday(&stop, NULL);
 	timersub(&stop, &start, &diff);
 
+	cgroup__put(cgrp_send);
+	cgroup__put(cgrp_recv);
+
 	switch (bench_format) {
 	case BENCH_FORMAT_DEFAULT:
 		printf("# Executed %d pipe operations between two %s\n\n",