blob: 4965f8b9055bb999285839a7d6dbb324cfcfff97 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Melob4209022019-08-29 15:56:40 -03002#include "debug.h"
Namhyung Kimd723a552013-03-15 14:58:11 +09003#include "evlist.h"
4#include "evsel.h"
Arnaldo Carvalho de Meloaeb00b12019-08-22 15:40:29 -03005#include "target.h"
Namhyung Kimd723a552013-03-15 14:58:11 +09006#include "thread_map.h"
Namhyung Kimd723a552013-03-15 14:58:11 +09007#include "tests.h"
Arnaldo Carvalho de Meloe0fcfb02019-09-23 12:20:38 -03008#include "util/mmap.h"
Namhyung Kimd723a552013-03-15 14:58:11 +09009
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030010#include <errno.h>
Namhyung Kimd723a552013-03-15 14:58:11 +090011#include <signal.h>
Arnaldo Carvalho de Melo8520a982019-08-29 16:18:59 -030012#include <linux/string.h>
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +010013#include <perf/cpumap.h>
Jiri Olsa453fa032019-07-21 13:24:43 +020014#include <perf/evlist.h>
Jiri Olsa7728fa02019-10-07 14:53:17 +020015#include <perf/mmap.h>
Namhyung Kimd723a552013-03-15 14:58:11 +090016
17static int exited;
18static int nr_exit;
19
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030020static void sig_handler(int sig __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090021{
22 exited = 1;
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030023}
Namhyung Kimd723a552013-03-15 14:58:11 +090024
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030025/*
26 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
27 * we asked by setting its exec_error to this handler.
28 */
29static void workload_exec_failed_signal(int signo __maybe_unused,
30 siginfo_t *info __maybe_unused,
31 void *ucontext __maybe_unused)
32{
33 exited = 1;
34 nr_exit = -1;
Namhyung Kimd723a552013-03-15 14:58:11 +090035}
36
37/*
38 * This test will start a workload that does nothing then it checks
39 * if the number of exit event reported by the kernel is 1 or not
40 * in order to check the kernel returns correct number of event.
41 */
Arnaldo Carvalho de Melo81f17c92017-08-03 15:16:31 -030042int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090043{
44 int err = -1;
45 union perf_event *event;
Jiri Olsa32dcd022019-07-21 13:23:51 +020046 struct evsel *evsel;
Jiri Olsa63503db2019-07-21 13:23:52 +020047 struct evlist *evlist;
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -030048 struct target target = {
Namhyung Kimd723a552013-03-15 14:58:11 +090049 .uid = UINT_MAX,
50 .uses_mmap = true,
51 };
52 const char *argv[] = { "true", NULL };
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +000053 char sbuf[STRERR_BUFSIZE];
Jiri Olsaf8548392019-07-21 13:23:49 +020054 struct perf_cpu_map *cpus;
Jiri Olsa9749b902019-07-21 13:23:50 +020055 struct perf_thread_map *threads;
Jiri Olsaa5830532019-07-27 20:30:53 +020056 struct mmap *md;
Namhyung Kimd723a552013-03-15 14:58:11 +090057
58 signal(SIGCHLD, sig_handler);
Namhyung Kimd723a552013-03-15 14:58:11 +090059
Jiri Olsab22d54b2013-09-01 12:36:14 +020060 evlist = perf_evlist__new_default();
Namhyung Kimd723a552013-03-15 14:58:11 +090061 if (evlist == NULL) {
Jiri Olsab22d54b2013-09-01 12:36:14 +020062 pr_debug("perf_evlist__new_default\n");
Namhyung Kimd723a552013-03-15 14:58:11 +090063 return -1;
64 }
Namhyung Kimd723a552013-03-15 14:58:11 +090065
66 /*
67 * Create maps of threads and cpus to monitor. In this case
68 * we start with all threads and cpus (-1, -1) but then in
69 * perf_evlist__prepare_workload we'll fill in the only thread
70 * we're monitoring, the one forked there.
71 */
Jiri Olsa397721e2019-07-21 13:24:16 +020072 cpus = perf_cpu_map__dummy_new();
Adrian Hunter29982722015-09-08 10:59:01 +030073 threads = thread_map__new_by_tid(-1);
74 if (!cpus || !threads) {
Namhyung Kimd723a552013-03-15 14:58:11 +090075 err = -ENOMEM;
76 pr_debug("Not enough memory to create thread/cpu maps\n");
Adrian Hunter29982722015-09-08 10:59:01 +030077 goto out_free_maps;
Namhyung Kimd723a552013-03-15 14:58:11 +090078 }
79
Jiri Olsa453fa032019-07-21 13:24:43 +020080 perf_evlist__set_maps(&evlist->core, cpus, threads);
Adrian Hunter29982722015-09-08 10:59:01 +030081
82 cpus = NULL;
83 threads = NULL;
84
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030085 err = perf_evlist__prepare_workload(evlist, &target, argv, false,
86 workload_exec_failed_signal);
Namhyung Kimd723a552013-03-15 14:58:11 +090087 if (err < 0) {
88 pr_debug("Couldn't run the workload!\n");
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -030089 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +090090 }
91
Jiri Olsa515dbe42019-09-03 10:39:52 +020092 evsel = evlist__first(evlist);
Jiri Olsa1fc632c2019-07-21 13:24:29 +020093 evsel->core.attr.task = 1;
Thomas Richter99654842017-11-23 12:46:11 +010094#ifdef __s390x__
Jiri Olsa1fc632c2019-07-21 13:24:29 +020095 evsel->core.attr.sample_freq = 1000000;
Thomas Richter99654842017-11-23 12:46:11 +010096#else
Jiri Olsa1fc632c2019-07-21 13:24:29 +020097 evsel->core.attr.sample_freq = 1;
Thomas Richter99654842017-11-23 12:46:11 +010098#endif
Jiri Olsa1fc632c2019-07-21 13:24:29 +020099 evsel->core.attr.inherit = 0;
100 evsel->core.attr.watermark = 0;
101 evsel->core.attr.wakeup_events = 1;
102 evsel->core.attr.exclude_kernel = 1;
Namhyung Kimd723a552013-03-15 14:58:11 +0900103
Jiri Olsa474ddc42019-07-21 13:24:06 +0200104 err = evlist__open(evlist);
Namhyung Kimd723a552013-03-15 14:58:11 +0900105 if (err < 0) {
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +0000106 pr_debug("Couldn't open the evlist: %s\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300107 str_error_r(-err, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300108 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900109 }
110
Jiri Olsa9521b5f2019-07-28 12:45:35 +0200111 if (evlist__mmap(evlist, 128) < 0) {
Namhyung Kimd723a552013-03-15 14:58:11 +0900112 pr_debug("failed to mmap events: %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300113 str_error_r(errno, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300114 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900115 }
116
117 perf_evlist__start_workload(evlist);
118
119retry:
Kan Liang75948732018-03-01 18:09:10 -0500120 md = &evlist->mmap[0];
Jiri Olsa7c4d4182019-10-07 14:53:18 +0200121 if (perf_mmap__read_init(&md->core) < 0)
Kan Liang75948732018-03-01 18:09:10 -0500122 goto out_init;
123
Jiri Olsa151ed5d2019-10-07 14:53:20 +0200124 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800125 if (event->header.type == PERF_RECORD_EXIT)
126 nr_exit++;
Namhyung Kimd723a552013-03-15 14:58:11 +0900127
Jiri Olsa7728fa02019-10-07 14:53:17 +0200128 perf_mmap__consume(&md->core);
Namhyung Kimd723a552013-03-15 14:58:11 +0900129 }
Jiri Olsa32fdc2c2019-10-07 14:53:19 +0200130 perf_mmap__read_done(&md->core);
Namhyung Kimd723a552013-03-15 14:58:11 +0900131
Kan Liang75948732018-03-01 18:09:10 -0500132out_init:
Namhyung Kimd723a552013-03-15 14:58:11 +0900133 if (!exited || !nr_exit) {
Jiri Olsa80ab2982019-08-31 22:48:33 +0200134 evlist__poll(evlist, -1);
Namhyung Kimd723a552013-03-15 14:58:11 +0900135 goto retry;
136 }
137
138 if (nr_exit != 1) {
139 pr_debug("received %d EXIT records\n", nr_exit);
140 err = -1;
141 }
142
Adrian Hunter29982722015-09-08 10:59:01 +0300143out_free_maps:
Jiri Olsa38f01d82019-07-21 13:24:17 +0200144 perf_cpu_map__put(cpus);
Jiri Olsa7836e522019-07-21 13:24:20 +0200145 perf_thread_map__put(threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300146out_delete_evlist:
Jiri Olsac12995a2019-07-21 13:23:56 +0200147 evlist__delete(evlist);
Namhyung Kimd723a552013-03-15 14:58:11 +0900148 return err;
149}