blob: d66767be4c459f4ca14587336e0c722f15be8870 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Namhyung Kimd723a552013-03-15 14:58:11 +09002#include "evlist.h"
3#include "evsel.h"
4#include "thread_map.h"
5#include "cpumap.h"
6#include "tests.h"
7
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03008#include <errno.h>
Namhyung Kimd723a552013-03-15 14:58:11 +09009#include <signal.h>
10
11static int exited;
12static int nr_exit;
13
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030014static void sig_handler(int sig __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090015{
16 exited = 1;
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030017}
Namhyung Kimd723a552013-03-15 14:58:11 +090018
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030019/*
20 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
21 * we asked by setting its exec_error to this handler.
22 */
23static void workload_exec_failed_signal(int signo __maybe_unused,
24 siginfo_t *info __maybe_unused,
25 void *ucontext __maybe_unused)
26{
27 exited = 1;
28 nr_exit = -1;
Namhyung Kimd723a552013-03-15 14:58:11 +090029}
30
31/*
32 * This test will start a workload that does nothing then it checks
33 * if the number of exit event reported by the kernel is 1 or not
34 * in order to check the kernel returns correct number of event.
35 */
Arnaldo Carvalho de Melo81f17c92017-08-03 15:16:31 -030036int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090037{
38 int err = -1;
39 union perf_event *event;
40 struct perf_evsel *evsel;
41 struct perf_evlist *evlist;
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -030042 struct target target = {
Namhyung Kimd723a552013-03-15 14:58:11 +090043 .uid = UINT_MAX,
44 .uses_mmap = true,
45 };
46 const char *argv[] = { "true", NULL };
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +000047 char sbuf[STRERR_BUFSIZE];
Jiri Olsaf8548392019-07-21 13:23:49 +020048 struct perf_cpu_map *cpus;
Adrian Hunter29982722015-09-08 10:59:01 +030049 struct thread_map *threads;
Kan Liang75948732018-03-01 18:09:10 -050050 struct perf_mmap *md;
Namhyung Kimd723a552013-03-15 14:58:11 +090051
52 signal(SIGCHLD, sig_handler);
Namhyung Kimd723a552013-03-15 14:58:11 +090053
Jiri Olsab22d54b2013-09-01 12:36:14 +020054 evlist = perf_evlist__new_default();
Namhyung Kimd723a552013-03-15 14:58:11 +090055 if (evlist == NULL) {
Jiri Olsab22d54b2013-09-01 12:36:14 +020056 pr_debug("perf_evlist__new_default\n");
Namhyung Kimd723a552013-03-15 14:58:11 +090057 return -1;
58 }
Namhyung Kimd723a552013-03-15 14:58:11 +090059
60 /*
61 * Create maps of threads and cpus to monitor. In this case
62 * we start with all threads and cpus (-1, -1) but then in
63 * perf_evlist__prepare_workload we'll fill in the only thread
64 * we're monitoring, the one forked there.
65 */
Adrian Hunter29982722015-09-08 10:59:01 +030066 cpus = cpu_map__dummy_new();
67 threads = thread_map__new_by_tid(-1);
68 if (!cpus || !threads) {
Namhyung Kimd723a552013-03-15 14:58:11 +090069 err = -ENOMEM;
70 pr_debug("Not enough memory to create thread/cpu maps\n");
Adrian Hunter29982722015-09-08 10:59:01 +030071 goto out_free_maps;
Namhyung Kimd723a552013-03-15 14:58:11 +090072 }
73
Adrian Hunter29982722015-09-08 10:59:01 +030074 perf_evlist__set_maps(evlist, cpus, threads);
75
76 cpus = NULL;
77 threads = NULL;
78
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030079 err = perf_evlist__prepare_workload(evlist, &target, argv, false,
80 workload_exec_failed_signal);
Namhyung Kimd723a552013-03-15 14:58:11 +090081 if (err < 0) {
82 pr_debug("Couldn't run the workload!\n");
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -030083 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +090084 }
85
86 evsel = perf_evlist__first(evlist);
87 evsel->attr.task = 1;
Thomas Richter99654842017-11-23 12:46:11 +010088#ifdef __s390x__
89 evsel->attr.sample_freq = 1000000;
90#else
Arnaldo Carvalho de Melo7a1ac112017-06-09 16:54:28 -030091 evsel->attr.sample_freq = 1;
Thomas Richter99654842017-11-23 12:46:11 +010092#endif
Namhyung Kimd723a552013-03-15 14:58:11 +090093 evsel->attr.inherit = 0;
94 evsel->attr.watermark = 0;
95 evsel->attr.wakeup_events = 1;
96 evsel->attr.exclude_kernel = 1;
97
98 err = perf_evlist__open(evlist);
99 if (err < 0) {
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +0000100 pr_debug("Couldn't open the evlist: %s\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300101 str_error_r(-err, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300102 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900103 }
104
Wang Nanf74b9d3a2017-12-03 02:00:37 +0000105 if (perf_evlist__mmap(evlist, 128) < 0) {
Namhyung Kimd723a552013-03-15 14:58:11 +0900106 pr_debug("failed to mmap events: %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300107 str_error_r(errno, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300108 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900109 }
110
111 perf_evlist__start_workload(evlist);
112
113retry:
Kan Liang75948732018-03-01 18:09:10 -0500114 md = &evlist->mmap[0];
Kan Liangb9bae2c2018-03-06 10:36:07 -0500115 if (perf_mmap__read_init(md) < 0)
Kan Liang75948732018-03-01 18:09:10 -0500116 goto out_init;
117
Kan Liang0019dc872018-03-06 10:36:06 -0500118 while ((event = perf_mmap__read_event(md)) != NULL) {
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800119 if (event->header.type == PERF_RECORD_EXIT)
120 nr_exit++;
Namhyung Kimd723a552013-03-15 14:58:11 +0900121
Kan Liangd6ace3d2018-03-06 10:36:05 -0500122 perf_mmap__consume(md);
Namhyung Kimd723a552013-03-15 14:58:11 +0900123 }
Kan Liang75948732018-03-01 18:09:10 -0500124 perf_mmap__read_done(md);
Namhyung Kimd723a552013-03-15 14:58:11 +0900125
Kan Liang75948732018-03-01 18:09:10 -0500126out_init:
Namhyung Kimd723a552013-03-15 14:58:11 +0900127 if (!exited || !nr_exit) {
Arnaldo Carvalho de Melof66a889d2014-08-18 17:25:59 -0300128 perf_evlist__poll(evlist, -1);
Namhyung Kimd723a552013-03-15 14:58:11 +0900129 goto retry;
130 }
131
132 if (nr_exit != 1) {
133 pr_debug("received %d EXIT records\n", nr_exit);
134 err = -1;
135 }
136
Adrian Hunter29982722015-09-08 10:59:01 +0300137out_free_maps:
138 cpu_map__put(cpus);
139 thread_map__put(threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300140out_delete_evlist:
Namhyung Kimd723a552013-03-15 14:58:11 +0900141 perf_evlist__delete(evlist);
142 return err;
143}