blob: 12af4a1bb1a4510078a3dc71852c5e360bdea22a [file] [log] [blame]
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018 Facebook */
3
4#include <uapi/linux/btf.h>
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -08005#include <uapi/linux/bpf.h>
6#include <uapi/linux/bpf_perf_event.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07007#include <uapi/linux/types.h>
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07008#include <linux/seq_file.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07009#include <linux/compiler.h>
Martin KaFai Lau2667a262018-11-19 15:29:08 -080010#include <linux/ctype.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -070011#include <linux/errno.h>
12#include <linux/slab.h>
Martin KaFai Lauf56a6532018-04-18 15:56:01 -070013#include <linux/anon_inodes.h>
14#include <linux/file.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -070015#include <linux/uaccess.h>
16#include <linux/kernel.h>
Martin KaFai Lau78958fc2018-05-04 14:49:51 -070017#include <linux/idr.h>
Martin KaFai Lauf80442a2018-05-22 14:57:18 -070018#include <linux/sort.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -070019#include <linux/bpf_verifier.h>
20#include <linux/btf.h>
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -080021#include <linux/skmsg.h>
22#include <linux/perf_event.h>
23#include <net/sock.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -070024
25/* BTF (BPF Type Format) is the meta data format which describes
26 * the data types of BPF program/map. Hence, it basically focus
27 * on the C programming language which the modern BPF is primary
28 * using.
29 *
30 * ELF Section:
31 * ~~~~~~~~~~~
32 * The BTF data is stored under the ".BTF" ELF section
33 *
34 * struct btf_type:
35 * ~~~~~~~~~~~~~~~
36 * Each 'struct btf_type' object describes a C data type.
37 * Depending on the type it is describing, a 'struct btf_type'
38 * object may be followed by more data. F.e.
39 * To describe an array, 'struct btf_type' is followed by
40 * 'struct btf_array'.
41 *
42 * 'struct btf_type' and any extra data following it are
43 * 4 bytes aligned.
44 *
45 * Type section:
46 * ~~~~~~~~~~~~~
47 * The BTF type section contains a list of 'struct btf_type' objects.
48 * Each one describes a C type. Recall from the above section
49 * that a 'struct btf_type' object could be immediately followed by extra
50 * data in order to desribe some particular C types.
51 *
52 * type_id:
53 * ~~~~~~~
54 * Each btf_type object is identified by a type_id. The type_id
55 * is implicitly implied by the location of the btf_type object in
56 * the BTF type section. The first one has type_id 1. The second
57 * one has type_id 2...etc. Hence, an earlier btf_type has
58 * a smaller type_id.
59 *
60 * A btf_type object may refer to another btf_type object by using
61 * type_id (i.e. the "type" in the "struct btf_type").
62 *
63 * NOTE that we cannot assume any reference-order.
64 * A btf_type object can refer to an earlier btf_type object
65 * but it can also refer to a later btf_type object.
66 *
67 * For example, to describe "const void *". A btf_type
68 * object describing "const" may refer to another btf_type
69 * object describing "void *". This type-reference is done
70 * by specifying type_id:
71 *
72 * [1] CONST (anon) type_id=2
73 * [2] PTR (anon) type_id=0
74 *
75 * The above is the btf_verifier debug log:
76 * - Each line started with "[?]" is a btf_type object
77 * - [?] is the type_id of the btf_type object.
78 * - CONST/PTR is the BTF_KIND_XXX
79 * - "(anon)" is the name of the type. It just
80 * happens that CONST and PTR has no name.
81 * - type_id=XXX is the 'u32 type' in btf_type
82 *
83 * NOTE: "void" has type_id 0
84 *
85 * String section:
86 * ~~~~~~~~~~~~~~
87 * The BTF string section contains the names used by the type section.
88 * Each string is referred by an "offset" from the beginning of the
89 * string section.
90 *
91 * Each string is '\0' terminated.
92 *
93 * The first character in the string section must be '\0'
94 * which is used to mean 'anonymous'. Some btf_type may not
95 * have a name.
96 */
97
98/* BTF verification:
99 *
100 * To verify BTF data, two passes are needed.
101 *
102 * Pass #1
103 * ~~~~~~~
104 * The first pass is to collect all btf_type objects to
105 * an array: "btf->types".
106 *
107 * Depending on the C type that a btf_type is describing,
108 * a btf_type may be followed by extra data. We don't know
109 * how many btf_type is there, and more importantly we don't
110 * know where each btf_type is located in the type section.
111 *
112 * Without knowing the location of each type_id, most verifications
113 * cannot be done. e.g. an earlier btf_type may refer to a later
114 * btf_type (recall the "const void *" above), so we cannot
115 * check this type-reference in the first pass.
116 *
117 * In the first pass, it still does some verifications (e.g.
118 * checking the name is a valid offset to the string section).
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700119 *
120 * Pass #2
121 * ~~~~~~~
122 * The main focus is to resolve a btf_type that is referring
123 * to another type.
124 *
125 * We have to ensure the referring type:
126 * 1) does exist in the BTF (i.e. in btf->types[])
127 * 2) does not cause a loop:
128 * struct A {
129 * struct B b;
130 * };
131 *
132 * struct B {
133 * struct A a;
134 * };
135 *
136 * btf_type_needs_resolve() decides if a btf_type needs
137 * to be resolved.
138 *
139 * The needs_resolve type implements the "resolve()" ops which
140 * essentially does a DFS and detects backedge.
141 *
142 * During resolve (or DFS), different C types have different
143 * "RESOLVED" conditions.
144 *
145 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
146 * members because a member is always referring to another
147 * type. A struct's member can be treated as "RESOLVED" if
148 * it is referring to a BTF_KIND_PTR. Otherwise, the
149 * following valid C struct would be rejected:
150 *
151 * struct A {
152 * int m;
153 * struct A *a;
154 * };
155 *
156 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
157 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
158 * detect a pointer loop, e.g.:
159 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
160 * ^ |
161 * +-----------------------------------------+
162 *
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700163 */
164
Yonghong Songb1e88182019-01-15 17:07:47 -0800165#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700166#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
167#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
168#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
169#define BITS_ROUNDUP_BYTES(bits) \
170 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
171
Yonghong Song9d5f9f72018-12-15 22:13:51 -0800172#define BTF_INFO_MASK 0x8f00ffff
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700173#define BTF_INT_MASK 0x0fffffff
174#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
175#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
176
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700177/* 16MB for 64k structs and each has 16 members and
178 * a few MB spaces for the string section.
179 * The hard limit is S32_MAX.
180 */
181#define BTF_MAX_SIZE (16 * 1024 * 1024)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700182
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700183#define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
187
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200188#define for_each_vsi(i, struct_type, member) \
189 for (i = 0, member = btf_type_var_secinfo(struct_type); \
190 i < btf_type_vlen(struct_type); \
191 i++, member++)
192
193#define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
196 i++, member++)
197
Quentin Monnet1b9ed842019-08-20 10:31:50 +0100198DEFINE_IDR(btf_idr);
199DEFINE_SPINLOCK(btf_idr_lock);
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700200
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700201struct btf {
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700202 void *data;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700203 struct btf_type **types;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700204 u32 *resolved_ids;
205 u32 *resolved_sizes;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700206 const char *strings;
207 void *nohdr_data;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700208 struct btf_header hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700209 u32 nr_types;
210 u32 types_size;
211 u32 data_size;
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700212 refcount_t refcnt;
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700213 u32 id;
214 struct rcu_head rcu;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700215};
216
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700217enum verifier_phase {
218 CHECK_META,
219 CHECK_TYPE,
220};
221
222struct resolve_vertex {
223 const struct btf_type *t;
224 u32 type_id;
225 u16 next_member;
226};
227
228enum visit_state {
229 NOT_VISITED,
230 VISITED,
231 RESOLVED,
232};
233
234enum resolve_mode {
235 RESOLVE_TBD, /* To Be Determined */
236 RESOLVE_PTR, /* Resolving for Pointer */
237 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
238 * or array
239 */
240};
241
242#define MAX_RESOLVE_DEPTH 32
243
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700244struct btf_sec_info {
245 u32 off;
246 u32 len;
247};
248
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700249struct btf_verifier_env {
250 struct btf *btf;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700251 u8 *visit_states;
252 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700253 struct bpf_verifier_log log;
254 u32 log_type_id;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700255 u32 top_stack;
256 enum verifier_phase phase;
257 enum resolve_mode resolve_mode;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700258};
259
260static const char * const btf_kind_str[NR_BTF_KINDS] = {
261 [BTF_KIND_UNKN] = "UNKNOWN",
262 [BTF_KIND_INT] = "INT",
263 [BTF_KIND_PTR] = "PTR",
264 [BTF_KIND_ARRAY] = "ARRAY",
265 [BTF_KIND_STRUCT] = "STRUCT",
266 [BTF_KIND_UNION] = "UNION",
267 [BTF_KIND_ENUM] = "ENUM",
268 [BTF_KIND_FWD] = "FWD",
269 [BTF_KIND_TYPEDEF] = "TYPEDEF",
270 [BTF_KIND_VOLATILE] = "VOLATILE",
271 [BTF_KIND_CONST] = "CONST",
272 [BTF_KIND_RESTRICT] = "RESTRICT",
Martin KaFai Lau2667a262018-11-19 15:29:08 -0800273 [BTF_KIND_FUNC] = "FUNC",
274 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200275 [BTF_KIND_VAR] = "VAR",
276 [BTF_KIND_DATASEC] = "DATASEC",
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700277};
278
279struct btf_kind_operations {
280 s32 (*check_meta)(struct btf_verifier_env *env,
281 const struct btf_type *t,
282 u32 meta_left);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700283 int (*resolve)(struct btf_verifier_env *env,
284 const struct resolve_vertex *v);
Martin KaFai Lau179cde82018-04-18 15:55:59 -0700285 int (*check_member)(struct btf_verifier_env *env,
286 const struct btf_type *struct_type,
287 const struct btf_member *member,
288 const struct btf_type *member_type);
Yonghong Song9d5f9f72018-12-15 22:13:51 -0800289 int (*check_kflag_member)(struct btf_verifier_env *env,
290 const struct btf_type *struct_type,
291 const struct btf_member *member,
292 const struct btf_type *member_type);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700293 void (*log_details)(struct btf_verifier_env *env,
294 const struct btf_type *t);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -0700295 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
296 u32 type_id, void *data, u8 bits_offsets,
297 struct seq_file *m);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700298};
299
300static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
301static struct btf_type btf_void;
302
Martin KaFai Lau2667a262018-11-19 15:29:08 -0800303static int btf_resolve(struct btf_verifier_env *env,
304 const struct btf_type *t, u32 type_id);
305
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700306static bool btf_type_is_modifier(const struct btf_type *t)
307{
308 /* Some of them is not strictly a C modifier
309 * but they are grouped into the same bucket
310 * for BTF concern:
311 * A type (t) that refers to another
312 * type through t->type AND its size cannot
313 * be determined without following the t->type.
314 *
315 * ptr does not fall into this bucket
316 * because its size is always sizeof(void *).
317 */
318 switch (BTF_INFO_KIND(t->info)) {
319 case BTF_KIND_TYPEDEF:
320 case BTF_KIND_VOLATILE:
321 case BTF_KIND_CONST:
322 case BTF_KIND_RESTRICT:
323 return true;
324 }
325
326 return false;
327}
328
Daniel Borkmann2824ecb2019-04-09 23:20:10 +0200329bool btf_type_is_void(const struct btf_type *t)
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700330{
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800331 return t == &btf_void;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700332}
333
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800334static bool btf_type_is_fwd(const struct btf_type *t)
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700335{
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800336 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
337}
338
339static bool btf_type_nosize(const struct btf_type *t)
340{
Martin KaFai Lau2667a262018-11-19 15:29:08 -0800341 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
342 btf_type_is_func(t) || btf_type_is_func_proto(t);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800343}
344
345static bool btf_type_nosize_or_null(const struct btf_type *t)
346{
347 return !t || btf_type_nosize(t);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700348}
349
350/* union is only a special case of struct:
351 * all its offsetof(member) == 0
352 */
353static bool btf_type_is_struct(const struct btf_type *t)
354{
355 u8 kind = BTF_INFO_KIND(t->info);
356
357 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
358}
359
Alexei Starovoitovd83525c2019-01-31 15:40:04 -0800360static bool __btf_type_is_struct(const struct btf_type *t)
361{
362 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
363}
364
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700365static bool btf_type_is_array(const struct btf_type *t)
366{
367 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
368}
369
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200370static bool btf_type_is_var(const struct btf_type *t)
371{
372 return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
373}
374
375static bool btf_type_is_datasec(const struct btf_type *t)
376{
377 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
378}
379
Martin KaFai Lau27ae79972020-01-08 16:35:03 -0800380s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
381{
382 const struct btf_type *t;
383 const char *tname;
384 u32 i;
385
386 for (i = 1; i <= btf->nr_types; i++) {
387 t = btf->types[i];
388 if (BTF_INFO_KIND(t->info) != kind)
389 continue;
390
391 tname = btf_name_by_offset(btf, t->name_off);
392 if (!strcmp(tname, name))
393 return i;
394 }
395
396 return -ENOENT;
397}
398
399const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
400 u32 id, u32 *res_id)
401{
402 const struct btf_type *t = btf_type_by_id(btf, id);
403
404 while (btf_type_is_modifier(t)) {
405 id = t->type;
406 t = btf_type_by_id(btf, t->type);
407 }
408
409 if (res_id)
410 *res_id = id;
411
412 return t;
413}
414
415const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
416 u32 id, u32 *res_id)
417{
418 const struct btf_type *t;
419
420 t = btf_type_skip_modifiers(btf, id, NULL);
421 if (!btf_type_is_ptr(t))
422 return NULL;
423
424 return btf_type_skip_modifiers(btf, t->type, res_id);
425}
426
427const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
428 u32 id, u32 *res_id)
429{
430 const struct btf_type *ptype;
431
432 ptype = btf_type_resolve_ptr(btf, id, res_id);
433 if (ptype && btf_type_is_func_proto(ptype))
434 return ptype;
435
436 return NULL;
437}
438
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200439/* Types that act only as a source, not sink or intermediate
440 * type when resolving.
441 */
442static bool btf_type_is_resolve_source_only(const struct btf_type *t)
443{
444 return btf_type_is_var(t) ||
445 btf_type_is_datasec(t);
446}
447
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700448/* What types need to be resolved?
449 *
450 * btf_type_is_modifier() is an obvious one.
451 *
452 * btf_type_is_struct() because its member refers to
453 * another type (through member->type).
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200454 *
455 * btf_type_is_var() because the variable refers to
456 * another type. btf_type_is_datasec() holds multiple
457 * btf_type_is_var() types that need resolving.
458 *
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700459 * btf_type_is_array() because its element (array->type)
460 * refers to another type. Array can be thought of a
461 * special case of struct while array just has the same
462 * member-type repeated by array->nelems of times.
463 */
464static bool btf_type_needs_resolve(const struct btf_type *t)
465{
466 return btf_type_is_modifier(t) ||
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200467 btf_type_is_ptr(t) ||
468 btf_type_is_struct(t) ||
469 btf_type_is_array(t) ||
470 btf_type_is_var(t) ||
471 btf_type_is_datasec(t);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700472}
473
474/* t->size can be used */
475static bool btf_type_has_size(const struct btf_type *t)
476{
477 switch (BTF_INFO_KIND(t->info)) {
478 case BTF_KIND_INT:
479 case BTF_KIND_STRUCT:
480 case BTF_KIND_UNION:
481 case BTF_KIND_ENUM:
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200482 case BTF_KIND_DATASEC:
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700483 return true;
484 }
485
486 return false;
487}
488
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700489static const char *btf_int_encoding_str(u8 encoding)
490{
491 if (encoding == 0)
492 return "(none)";
493 else if (encoding == BTF_INT_SIGNED)
494 return "SIGNED";
495 else if (encoding == BTF_INT_CHAR)
496 return "CHAR";
497 else if (encoding == BTF_INT_BOOL)
498 return "BOOL";
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700499 else
500 return "UNKN";
501}
502
Yonghong Song9d5f9f72018-12-15 22:13:51 -0800503static u32 btf_member_bit_offset(const struct btf_type *struct_type,
504 const struct btf_member *member)
505{
506 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
507 : member->offset;
508}
509
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700510static u32 btf_type_int(const struct btf_type *t)
511{
512 return *(u32 *)(t + 1);
513}
514
515static const struct btf_array *btf_type_array(const struct btf_type *t)
516{
517 return (const struct btf_array *)(t + 1);
518}
519
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700520static const struct btf_enum *btf_type_enum(const struct btf_type *t)
521{
522 return (const struct btf_enum *)(t + 1);
523}
524
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200525static const struct btf_var *btf_type_var(const struct btf_type *t)
526{
527 return (const struct btf_var *)(t + 1);
528}
529
530static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
531{
532 return (const struct btf_var_secinfo *)(t + 1);
533}
534
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700535static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
536{
537 return kind_ops[BTF_INFO_KIND(t->info)];
538}
539
Mathieu Malaterre583c5312019-01-16 20:29:40 +0100540static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700541{
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700542 return BTF_STR_OFFSET_VALID(offset) &&
543 offset < btf->hdr.str_len;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700544}
545
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200546static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
547{
548 if ((first ? !isalpha(c) :
549 !isalnum(c)) &&
550 c != '_' &&
551 ((c == '.' && !dot_ok) ||
552 c != '.'))
553 return false;
554 return true;
555}
556
557static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
Martin KaFai Lau2667a262018-11-19 15:29:08 -0800558{
559 /* offset must be valid */
560 const char *src = &btf->strings[offset];
561 const char *src_limit;
562
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200563 if (!__btf_name_char_ok(*src, true, dot_ok))
Martin KaFai Lau2667a262018-11-19 15:29:08 -0800564 return false;
565
566 /* set a limit on identifier length */
567 src_limit = src + KSYM_NAME_LEN;
568 src++;
569 while (*src && src < src_limit) {
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200570 if (!__btf_name_char_ok(*src, false, dot_ok))
Martin KaFai Lau2667a262018-11-19 15:29:08 -0800571 return false;
572 src++;
573 }
574
575 return !*src;
576}
577
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200578/* Only C-style identifier is permitted. This can be relaxed if
579 * necessary.
580 */
581static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
582{
583 return __btf_name_valid(btf, offset, false);
584}
585
586static bool btf_name_valid_section(const struct btf *btf, u32 offset)
587{
588 return __btf_name_valid(btf, offset, true);
589}
590
Martin KaFai Lau23127b32018-12-13 10:41:46 -0800591static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700592{
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700593 if (!offset)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700594 return "(anon)";
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700595 else if (offset < btf->hdr.str_len)
596 return &btf->strings[offset];
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700597 else
598 return "(invalid-name-offset)";
599}
600
Martin KaFai Lau23127b32018-12-13 10:41:46 -0800601const char *btf_name_by_offset(const struct btf *btf, u32 offset)
602{
603 if (offset < btf->hdr.str_len)
604 return &btf->strings[offset];
605
606 return NULL;
607}
608
Yonghong Song838e9692018-11-19 15:29:11 -0800609const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700610{
611 if (type_id > btf->nr_types)
612 return NULL;
613
614 return btf->types[type_id];
615}
616
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -0700617/*
618 * Regular int is not a bit field and it must be either
Yonghong Songb1e88182019-01-15 17:07:47 -0800619 * u8/u16/u32/u64 or __int128.
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -0700620 */
621static bool btf_type_int_is_regular(const struct btf_type *t)
622{
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -0700623 u8 nr_bits, nr_bytes;
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -0700624 u32 int_data;
625
626 int_data = btf_type_int(t);
627 nr_bits = BTF_INT_BITS(int_data);
628 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
629 if (BITS_PER_BYTE_MASKED(nr_bits) ||
630 BTF_INT_OFFSET(int_data) ||
631 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
Yonghong Songb1e88182019-01-15 17:07:47 -0800632 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
633 nr_bytes != (2 * sizeof(u64)))) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -0700634 return false;
635 }
636
637 return true;
638}
639
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800640/*
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800641 * Check that given struct member is a regular int with expected
642 * offset and size.
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800643 */
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800644bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
645 const struct btf_member *m,
646 u32 expected_offset, u32 expected_size)
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800647{
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800648 const struct btf_type *t;
649 u32 id, int_data;
650 u8 nr_bits;
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800651
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800652 id = m->type;
653 t = btf_type_id_size(btf, &id, NULL);
654 if (!t || !btf_type_is_int(t))
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800655 return false;
656
657 int_data = btf_type_int(t);
658 nr_bits = BTF_INT_BITS(int_data);
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800659 if (btf_type_kflag(s)) {
660 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
661 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
662
663 /* if kflag set, int should be a regular int and
664 * bit offset should be at byte boundary.
665 */
666 return !bitfield_size &&
667 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
668 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
669 }
670
671 if (BTF_INT_OFFSET(int_data) ||
672 BITS_PER_BYTE_MASKED(m->offset) ||
673 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
674 BITS_PER_BYTE_MASKED(nr_bits) ||
675 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800676 return false;
677
678 return true;
679}
680
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700681__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
682 const char *fmt, ...)
683{
684 va_list args;
685
686 va_start(args, fmt);
687 bpf_verifier_vlog(log, fmt, args);
688 va_end(args);
689}
690
691__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
692 const char *fmt, ...)
693{
694 struct bpf_verifier_log *log = &env->log;
695 va_list args;
696
697 if (!bpf_verifier_log_needed(log))
698 return;
699
700 va_start(args, fmt);
701 bpf_verifier_vlog(log, fmt, args);
702 va_end(args);
703}
704
705__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
706 const struct btf_type *t,
707 bool log_details,
708 const char *fmt, ...)
709{
710 struct bpf_verifier_log *log = &env->log;
711 u8 kind = BTF_INFO_KIND(t->info);
712 struct btf *btf = env->btf;
713 va_list args;
714
715 if (!bpf_verifier_log_needed(log))
716 return;
717
Alexei Starovoitov8580ac92019-10-15 20:24:57 -0700718 /* btf verifier prints all types it is processing via
719 * btf_verifier_log_type(..., fmt = NULL).
720 * Skip those prints for in-kernel BTF verification.
721 */
722 if (log->level == BPF_LOG_KERNEL && !fmt)
723 return;
724
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700725 __btf_verifier_log(log, "[%u] %s %s%s",
726 env->log_type_id,
727 btf_kind_str[kind],
Martin KaFai Lau23127b32018-12-13 10:41:46 -0800728 __btf_name_by_offset(btf, t->name_off),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700729 log_details ? " " : "");
730
731 if (log_details)
732 btf_type_ops(t)->log_details(env, t);
733
734 if (fmt && *fmt) {
735 __btf_verifier_log(log, " ");
736 va_start(args, fmt);
737 bpf_verifier_vlog(log, fmt, args);
738 va_end(args);
739 }
740
741 __btf_verifier_log(log, "\n");
742}
743
744#define btf_verifier_log_type(env, t, ...) \
745 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
746#define btf_verifier_log_basic(env, t, ...) \
747 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
748
749__printf(4, 5)
750static void btf_verifier_log_member(struct btf_verifier_env *env,
751 const struct btf_type *struct_type,
752 const struct btf_member *member,
753 const char *fmt, ...)
754{
755 struct bpf_verifier_log *log = &env->log;
756 struct btf *btf = env->btf;
757 va_list args;
758
759 if (!bpf_verifier_log_needed(log))
760 return;
761
Alexei Starovoitov8580ac92019-10-15 20:24:57 -0700762 if (log->level == BPF_LOG_KERNEL && !fmt)
763 return;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700764 /* The CHECK_META phase already did a btf dump.
765 *
766 * If member is logged again, it must hit an error in
767 * parsing this member. It is useful to print out which
768 * struct this member belongs to.
769 */
770 if (env->phase != CHECK_META)
771 btf_verifier_log_type(env, struct_type, NULL);
772
Yonghong Song9d5f9f72018-12-15 22:13:51 -0800773 if (btf_type_kflag(struct_type))
774 __btf_verifier_log(log,
775 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
776 __btf_name_by_offset(btf, member->name_off),
777 member->type,
778 BTF_MEMBER_BITFIELD_SIZE(member->offset),
779 BTF_MEMBER_BIT_OFFSET(member->offset));
780 else
781 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
782 __btf_name_by_offset(btf, member->name_off),
783 member->type, member->offset);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700784
785 if (fmt && *fmt) {
786 __btf_verifier_log(log, " ");
787 va_start(args, fmt);
788 bpf_verifier_vlog(log, fmt, args);
789 va_end(args);
790 }
791
792 __btf_verifier_log(log, "\n");
793}
794
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200795__printf(4, 5)
796static void btf_verifier_log_vsi(struct btf_verifier_env *env,
797 const struct btf_type *datasec_type,
798 const struct btf_var_secinfo *vsi,
799 const char *fmt, ...)
800{
801 struct bpf_verifier_log *log = &env->log;
802 va_list args;
803
804 if (!bpf_verifier_log_needed(log))
805 return;
Alexei Starovoitov8580ac92019-10-15 20:24:57 -0700806 if (log->level == BPF_LOG_KERNEL && !fmt)
807 return;
Daniel Borkmann1dc92852019-04-09 23:20:09 +0200808 if (env->phase != CHECK_META)
809 btf_verifier_log_type(env, datasec_type, NULL);
810
811 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
812 vsi->type, vsi->offset, vsi->size);
813 if (fmt && *fmt) {
814 __btf_verifier_log(log, " ");
815 va_start(args, fmt);
816 bpf_verifier_vlog(log, fmt, args);
817 va_end(args);
818 }
819
820 __btf_verifier_log(log, "\n");
821}
822
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700823static void btf_verifier_log_hdr(struct btf_verifier_env *env,
824 u32 btf_data_size)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700825{
826 struct bpf_verifier_log *log = &env->log;
827 const struct btf *btf = env->btf;
828 const struct btf_header *hdr;
829
830 if (!bpf_verifier_log_needed(log))
831 return;
832
Alexei Starovoitov8580ac92019-10-15 20:24:57 -0700833 if (log->level == BPF_LOG_KERNEL)
834 return;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700835 hdr = &btf->hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700836 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
837 __btf_verifier_log(log, "version: %u\n", hdr->version);
838 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700839 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700840 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700841 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700842 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
843 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700844 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700845}
846
847static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
848{
849 struct btf *btf = env->btf;
850
851 /* < 2 because +1 for btf_void which is always in btf->types[0].
852 * btf_void is not accounted in btf->nr_types because btf_void
853 * does not come from the BTF file.
854 */
855 if (btf->types_size - btf->nr_types < 2) {
856 /* Expand 'types' array */
857
858 struct btf_type **new_types;
859 u32 expand_by, new_size;
860
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700861 if (btf->types_size == BTF_MAX_TYPE) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700862 btf_verifier_log(env, "Exceeded max num of types");
863 return -E2BIG;
864 }
865
866 expand_by = max_t(u32, btf->types_size >> 2, 16);
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700867 new_size = min_t(u32, BTF_MAX_TYPE,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700868 btf->types_size + expand_by);
869
Kees Cook778e1cd2018-06-12 14:04:48 -0700870 new_types = kvcalloc(new_size, sizeof(*new_types),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700871 GFP_KERNEL | __GFP_NOWARN);
872 if (!new_types)
873 return -ENOMEM;
874
875 if (btf->nr_types == 0)
876 new_types[0] = &btf_void;
877 else
878 memcpy(new_types, btf->types,
879 sizeof(*btf->types) * (btf->nr_types + 1));
880
881 kvfree(btf->types);
882 btf->types = new_types;
883 btf->types_size = new_size;
884 }
885
886 btf->types[++(btf->nr_types)] = t;
887
888 return 0;
889}
890
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700891static int btf_alloc_id(struct btf *btf)
892{
893 int id;
894
895 idr_preload(GFP_KERNEL);
896 spin_lock_bh(&btf_idr_lock);
897 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
898 if (id > 0)
899 btf->id = id;
900 spin_unlock_bh(&btf_idr_lock);
901 idr_preload_end();
902
903 if (WARN_ON_ONCE(!id))
904 return -ENOSPC;
905
906 return id > 0 ? 0 : id;
907}
908
909static void btf_free_id(struct btf *btf)
910{
911 unsigned long flags;
912
913 /*
914 * In map-in-map, calling map_delete_elem() on outer
915 * map will call bpf_map_put on the inner map.
916 * It will then eventually call btf_free_id()
917 * on the inner map. Some of the map_delete_elem()
918 * implementation may have irq disabled, so
919 * we need to use the _irqsave() version instead
920 * of the _bh() version.
921 */
922 spin_lock_irqsave(&btf_idr_lock, flags);
923 idr_remove(&btf_idr, btf->id);
924 spin_unlock_irqrestore(&btf_idr_lock, flags);
925}
926
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700927static void btf_free(struct btf *btf)
928{
929 kvfree(btf->types);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700930 kvfree(btf->resolved_sizes);
931 kvfree(btf->resolved_ids);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700932 kvfree(btf->data);
933 kfree(btf);
934}
935
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700936static void btf_free_rcu(struct rcu_head *rcu)
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700937{
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700938 struct btf *btf = container_of(rcu, struct btf, rcu);
939
940 btf_free(btf);
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700941}
942
943void btf_put(struct btf *btf)
944{
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700945 if (btf && refcount_dec_and_test(&btf->refcnt)) {
946 btf_free_id(btf);
947 call_rcu(&btf->rcu, btf_free_rcu);
948 }
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700949}
950
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700951static int env_resolve_init(struct btf_verifier_env *env)
952{
953 struct btf *btf = env->btf;
954 u32 nr_types = btf->nr_types;
955 u32 *resolved_sizes = NULL;
956 u32 *resolved_ids = NULL;
957 u8 *visit_states = NULL;
958
959 /* +1 for btf_void */
Kees Cook778e1cd2018-06-12 14:04:48 -0700960 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700961 GFP_KERNEL | __GFP_NOWARN);
962 if (!resolved_sizes)
963 goto nomem;
964
Kees Cook778e1cd2018-06-12 14:04:48 -0700965 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700966 GFP_KERNEL | __GFP_NOWARN);
967 if (!resolved_ids)
968 goto nomem;
969
Kees Cook778e1cd2018-06-12 14:04:48 -0700970 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700971 GFP_KERNEL | __GFP_NOWARN);
972 if (!visit_states)
973 goto nomem;
974
975 btf->resolved_sizes = resolved_sizes;
976 btf->resolved_ids = resolved_ids;
977 env->visit_states = visit_states;
978
979 return 0;
980
981nomem:
982 kvfree(resolved_sizes);
983 kvfree(resolved_ids);
984 kvfree(visit_states);
985 return -ENOMEM;
986}
987
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700988static void btf_verifier_env_free(struct btf_verifier_env *env)
989{
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700990 kvfree(env->visit_states);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700991 kfree(env);
992}
993
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700994static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
995 const struct btf_type *next_type)
996{
997 switch (env->resolve_mode) {
998 case RESOLVE_TBD:
999 /* int, enum or void is a sink */
1000 return !btf_type_needs_resolve(next_type);
1001 case RESOLVE_PTR:
Martin KaFai Lau2667a262018-11-19 15:29:08 -08001002 /* int, enum, void, struct, array, func or func_proto is a sink
1003 * for ptr
1004 */
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001005 return !btf_type_is_modifier(next_type) &&
1006 !btf_type_is_ptr(next_type);
1007 case RESOLVE_STRUCT_OR_ARRAY:
Martin KaFai Lau2667a262018-11-19 15:29:08 -08001008 /* int, enum, void, ptr, func or func_proto is a sink
1009 * for struct and array
1010 */
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001011 return !btf_type_is_modifier(next_type) &&
1012 !btf_type_is_array(next_type) &&
1013 !btf_type_is_struct(next_type);
1014 default:
Arnd Bergmann53c80362018-05-25 23:33:19 +02001015 BUG();
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001016 }
1017}
1018
1019static bool env_type_is_resolved(const struct btf_verifier_env *env,
1020 u32 type_id)
1021{
1022 return env->visit_states[type_id] == RESOLVED;
1023}
1024
1025static int env_stack_push(struct btf_verifier_env *env,
1026 const struct btf_type *t, u32 type_id)
1027{
1028 struct resolve_vertex *v;
1029
1030 if (env->top_stack == MAX_RESOLVE_DEPTH)
1031 return -E2BIG;
1032
1033 if (env->visit_states[type_id] != NOT_VISITED)
1034 return -EEXIST;
1035
1036 env->visit_states[type_id] = VISITED;
1037
1038 v = &env->stack[env->top_stack++];
1039 v->t = t;
1040 v->type_id = type_id;
1041 v->next_member = 0;
1042
1043 if (env->resolve_mode == RESOLVE_TBD) {
1044 if (btf_type_is_ptr(t))
1045 env->resolve_mode = RESOLVE_PTR;
1046 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1047 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1048 }
1049
1050 return 0;
1051}
1052
1053static void env_stack_set_next_member(struct btf_verifier_env *env,
1054 u16 next_member)
1055{
1056 env->stack[env->top_stack - 1].next_member = next_member;
1057}
1058
1059static void env_stack_pop_resolved(struct btf_verifier_env *env,
1060 u32 resolved_type_id,
1061 u32 resolved_size)
1062{
1063 u32 type_id = env->stack[--(env->top_stack)].type_id;
1064 struct btf *btf = env->btf;
1065
1066 btf->resolved_sizes[type_id] = resolved_size;
1067 btf->resolved_ids[type_id] = resolved_type_id;
1068 env->visit_states[type_id] = RESOLVED;
1069}
1070
1071static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1072{
1073 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1074}
1075
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08001076/* Resolve the size of a passed-in "type"
1077 *
1078 * type: is an array (e.g. u32 array[x][y])
1079 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1080 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
1081 * corresponds to the return type.
1082 * *elem_type: u32
1083 * *total_nelems: (x * y). Hence, individual elem size is
1084 * (*type_size / *total_nelems)
1085 *
1086 * type: is not an array (e.g. const struct X)
1087 * return type: type "struct X"
1088 * *type_size: sizeof(struct X)
1089 * *elem_type: same as return type ("struct X")
1090 * *total_nelems: 1
1091 */
1092static const struct btf_type *
1093btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1094 u32 *type_size, const struct btf_type **elem_type,
1095 u32 *total_nelems)
1096{
1097 const struct btf_type *array_type = NULL;
1098 const struct btf_array *array;
1099 u32 i, size, nelems = 1;
1100
1101 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1102 switch (BTF_INFO_KIND(type->info)) {
1103 /* type->size can be used */
1104 case BTF_KIND_INT:
1105 case BTF_KIND_STRUCT:
1106 case BTF_KIND_UNION:
1107 case BTF_KIND_ENUM:
1108 size = type->size;
1109 goto resolved;
1110
1111 case BTF_KIND_PTR:
1112 size = sizeof(void *);
1113 goto resolved;
1114
1115 /* Modifiers */
1116 case BTF_KIND_TYPEDEF:
1117 case BTF_KIND_VOLATILE:
1118 case BTF_KIND_CONST:
1119 case BTF_KIND_RESTRICT:
1120 type = btf_type_by_id(btf, type->type);
1121 break;
1122
1123 case BTF_KIND_ARRAY:
1124 if (!array_type)
1125 array_type = type;
1126 array = btf_type_array(type);
1127 if (nelems && array->nelems > U32_MAX / nelems)
1128 return ERR_PTR(-EINVAL);
1129 nelems *= array->nelems;
1130 type = btf_type_by_id(btf, array->type);
1131 break;
1132
1133 /* type without size */
1134 default:
1135 return ERR_PTR(-EINVAL);
1136 }
1137 }
1138
1139 return ERR_PTR(-EINVAL);
1140
1141resolved:
1142 if (nelems && size > U32_MAX / nelems)
1143 return ERR_PTR(-EINVAL);
1144
1145 *type_size = nelems * size;
1146 *total_nelems = nelems;
1147 *elem_type = type;
1148
1149 return array_type ? : type;
1150}
1151
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001152/* The input param "type_id" must point to a needs_resolve type */
1153static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1154 u32 *type_id)
1155{
1156 *type_id = btf->resolved_ids[*type_id];
1157 return btf_type_by_id(btf, *type_id);
1158}
1159
1160const struct btf_type *btf_type_id_size(const struct btf *btf,
1161 u32 *type_id, u32 *ret_size)
1162{
1163 const struct btf_type *size_type;
1164 u32 size_type_id = *type_id;
1165 u32 size = 0;
1166
1167 size_type = btf_type_by_id(btf, size_type_id);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001168 if (btf_type_nosize_or_null(size_type))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001169 return NULL;
1170
1171 if (btf_type_has_size(size_type)) {
1172 size = size_type->size;
1173 } else if (btf_type_is_array(size_type)) {
1174 size = btf->resolved_sizes[size_type_id];
1175 } else if (btf_type_is_ptr(size_type)) {
1176 size = sizeof(void *);
1177 } else {
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001178 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1179 !btf_type_is_var(size_type)))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001180 return NULL;
1181
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001182 size_type_id = btf->resolved_ids[size_type_id];
1183 size_type = btf_type_by_id(btf, size_type_id);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001184 if (btf_type_nosize_or_null(size_type))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001185 return NULL;
Andrii Nakryiko1acc5d52019-07-12 10:25:55 -07001186 else if (btf_type_has_size(size_type))
1187 size = size_type->size;
1188 else if (btf_type_is_array(size_type))
1189 size = btf->resolved_sizes[size_type_id];
1190 else if (btf_type_is_ptr(size_type))
1191 size = sizeof(void *);
1192 else
1193 return NULL;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001194 }
1195
1196 *type_id = size_type_id;
1197 if (ret_size)
1198 *ret_size = size;
1199
1200 return size_type;
1201}
1202
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001203static int btf_df_check_member(struct btf_verifier_env *env,
1204 const struct btf_type *struct_type,
1205 const struct btf_member *member,
1206 const struct btf_type *member_type)
1207{
1208 btf_verifier_log_basic(env, struct_type,
1209 "Unsupported check_member");
1210 return -EINVAL;
1211}
1212
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001213static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1214 const struct btf_type *struct_type,
1215 const struct btf_member *member,
1216 const struct btf_type *member_type)
1217{
1218 btf_verifier_log_basic(env, struct_type,
1219 "Unsupported check_kflag_member");
1220 return -EINVAL;
1221}
1222
1223/* Used for ptr, array and struct/union type members.
1224 * int, enum and modifier types have their specific callback functions.
1225 */
1226static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1227 const struct btf_type *struct_type,
1228 const struct btf_member *member,
1229 const struct btf_type *member_type)
1230{
1231 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1232 btf_verifier_log_member(env, struct_type, member,
1233 "Invalid member bitfield_size");
1234 return -EINVAL;
1235 }
1236
1237 /* bitfield size is 0, so member->offset represents bit offset only.
1238 * It is safe to call non kflag check_member variants.
1239 */
1240 return btf_type_ops(member_type)->check_member(env, struct_type,
1241 member,
1242 member_type);
1243}
1244
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001245static int btf_df_resolve(struct btf_verifier_env *env,
1246 const struct resolve_vertex *v)
1247{
1248 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1249 return -EINVAL;
1250}
1251
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001252static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1253 u32 type_id, void *data, u8 bits_offsets,
1254 struct seq_file *m)
1255{
1256 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1257}
1258
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001259static int btf_int_check_member(struct btf_verifier_env *env,
1260 const struct btf_type *struct_type,
1261 const struct btf_member *member,
1262 const struct btf_type *member_type)
1263{
1264 u32 int_data = btf_type_int(member_type);
1265 u32 struct_bits_off = member->offset;
1266 u32 struct_size = struct_type->size;
1267 u32 nr_copy_bits;
1268 u32 bytes_offset;
1269
1270 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1271 btf_verifier_log_member(env, struct_type, member,
1272 "bits_offset exceeds U32_MAX");
1273 return -EINVAL;
1274 }
1275
1276 struct_bits_off += BTF_INT_OFFSET(int_data);
1277 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1278 nr_copy_bits = BTF_INT_BITS(int_data) +
1279 BITS_PER_BYTE_MASKED(struct_bits_off);
1280
Yonghong Songb1e88182019-01-15 17:07:47 -08001281 if (nr_copy_bits > BITS_PER_U128) {
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001282 btf_verifier_log_member(env, struct_type, member,
Yonghong Songb1e88182019-01-15 17:07:47 -08001283 "nr_copy_bits exceeds 128");
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001284 return -EINVAL;
1285 }
1286
1287 if (struct_size < bytes_offset ||
1288 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1289 btf_verifier_log_member(env, struct_type, member,
1290 "Member exceeds struct_size");
1291 return -EINVAL;
1292 }
1293
1294 return 0;
1295}
1296
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001297static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1298 const struct btf_type *struct_type,
1299 const struct btf_member *member,
1300 const struct btf_type *member_type)
1301{
1302 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1303 u32 int_data = btf_type_int(member_type);
1304 u32 struct_size = struct_type->size;
1305 u32 nr_copy_bits;
1306
1307 /* a regular int type is required for the kflag int member */
1308 if (!btf_type_int_is_regular(member_type)) {
1309 btf_verifier_log_member(env, struct_type, member,
1310 "Invalid member base type");
1311 return -EINVAL;
1312 }
1313
1314 /* check sanity of bitfield size */
1315 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1316 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1317 nr_int_data_bits = BTF_INT_BITS(int_data);
1318 if (!nr_bits) {
1319 /* Not a bitfield member, member offset must be at byte
1320 * boundary.
1321 */
1322 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1323 btf_verifier_log_member(env, struct_type, member,
1324 "Invalid member offset");
1325 return -EINVAL;
1326 }
1327
1328 nr_bits = nr_int_data_bits;
1329 } else if (nr_bits > nr_int_data_bits) {
1330 btf_verifier_log_member(env, struct_type, member,
1331 "Invalid member bitfield_size");
1332 return -EINVAL;
1333 }
1334
1335 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1336 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
Yonghong Songb1e88182019-01-15 17:07:47 -08001337 if (nr_copy_bits > BITS_PER_U128) {
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001338 btf_verifier_log_member(env, struct_type, member,
Yonghong Songb1e88182019-01-15 17:07:47 -08001339 "nr_copy_bits exceeds 128");
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001340 return -EINVAL;
1341 }
1342
1343 if (struct_size < bytes_offset ||
1344 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1345 btf_verifier_log_member(env, struct_type, member,
1346 "Member exceeds struct_size");
1347 return -EINVAL;
1348 }
1349
1350 return 0;
1351}
1352
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001353static s32 btf_int_check_meta(struct btf_verifier_env *env,
1354 const struct btf_type *t,
1355 u32 meta_left)
1356{
1357 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1358 u16 encoding;
1359
1360 if (meta_left < meta_needed) {
1361 btf_verifier_log_basic(env, t,
1362 "meta_left:%u meta_needed:%u",
1363 meta_left, meta_needed);
1364 return -EINVAL;
1365 }
1366
1367 if (btf_type_vlen(t)) {
1368 btf_verifier_log_type(env, t, "vlen != 0");
1369 return -EINVAL;
1370 }
1371
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001372 if (btf_type_kflag(t)) {
1373 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1374 return -EINVAL;
1375 }
1376
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001377 int_data = btf_type_int(t);
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001378 if (int_data & ~BTF_INT_MASK) {
1379 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1380 int_data);
1381 return -EINVAL;
1382 }
1383
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001384 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1385
Yonghong Songb1e88182019-01-15 17:07:47 -08001386 if (nr_bits > BITS_PER_U128) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001387 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
Yonghong Songb1e88182019-01-15 17:07:47 -08001388 BITS_PER_U128);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001389 return -EINVAL;
1390 }
1391
1392 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1393 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1394 return -EINVAL;
1395 }
1396
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001397 /*
1398 * Only one of the encoding bits is allowed and it
1399 * should be sufficient for the pretty print purpose (i.e. decoding).
1400 * Multiple bits can be allowed later if it is found
1401 * to be insufficient.
1402 */
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001403 encoding = BTF_INT_ENCODING(int_data);
1404 if (encoding &&
1405 encoding != BTF_INT_SIGNED &&
1406 encoding != BTF_INT_CHAR &&
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001407 encoding != BTF_INT_BOOL) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001408 btf_verifier_log_type(env, t, "Unsupported encoding");
1409 return -ENOTSUPP;
1410 }
1411
1412 btf_verifier_log_type(env, t, NULL);
1413
1414 return meta_needed;
1415}
1416
1417static void btf_int_log(struct btf_verifier_env *env,
1418 const struct btf_type *t)
1419{
1420 int int_data = btf_type_int(t);
1421
1422 btf_verifier_log(env,
1423 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1424 t->size, BTF_INT_OFFSET(int_data),
1425 BTF_INT_BITS(int_data),
1426 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1427}
1428
Yonghong Songb1e88182019-01-15 17:07:47 -08001429static void btf_int128_print(struct seq_file *m, void *data)
1430{
1431 /* data points to a __int128 number.
1432 * Suppose
1433 * int128_num = *(__int128 *)data;
1434 * The below formulas shows what upper_num and lower_num represents:
1435 * upper_num = int128_num >> 64;
1436 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1437 */
1438 u64 upper_num, lower_num;
1439
1440#ifdef __BIG_ENDIAN_BITFIELD
1441 upper_num = *(u64 *)data;
1442 lower_num = *(u64 *)(data + 8);
1443#else
1444 upper_num = *(u64 *)(data + 8);
1445 lower_num = *(u64 *)data;
1446#endif
1447 if (upper_num == 0)
1448 seq_printf(m, "0x%llx", lower_num);
1449 else
1450 seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1451}
1452
1453static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1454 u16 right_shift_bits)
1455{
1456 u64 upper_num, lower_num;
1457
1458#ifdef __BIG_ENDIAN_BITFIELD
1459 upper_num = print_num[0];
1460 lower_num = print_num[1];
1461#else
1462 upper_num = print_num[1];
1463 lower_num = print_num[0];
1464#endif
1465
1466 /* shake out un-needed bits by shift/or operations */
1467 if (left_shift_bits >= 64) {
1468 upper_num = lower_num << (left_shift_bits - 64);
1469 lower_num = 0;
1470 } else {
1471 upper_num = (upper_num << left_shift_bits) |
1472 (lower_num >> (64 - left_shift_bits));
1473 lower_num = lower_num << left_shift_bits;
1474 }
1475
1476 if (right_shift_bits >= 64) {
1477 lower_num = upper_num >> (right_shift_bits - 64);
1478 upper_num = 0;
1479 } else {
1480 lower_num = (lower_num >> right_shift_bits) |
1481 (upper_num << (64 - right_shift_bits));
1482 upper_num = upper_num >> right_shift_bits;
1483 }
1484
1485#ifdef __BIG_ENDIAN_BITFIELD
1486 print_num[0] = upper_num;
1487 print_num[1] = lower_num;
1488#else
1489 print_num[0] = lower_num;
1490 print_num[1] = upper_num;
1491#endif
1492}
1493
Yonghong Songf97be3a2018-12-15 22:13:50 -08001494static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1495 u8 nr_bits, struct seq_file *m)
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001496{
Okash Khawajab65f3702018-07-10 14:33:07 -07001497 u16 left_shift_bits, right_shift_bits;
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -07001498 u8 nr_copy_bytes;
1499 u8 nr_copy_bits;
Yonghong Songb1e88182019-01-15 17:07:47 -08001500 u64 print_num[2] = {};
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001501
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001502 nr_copy_bits = nr_bits + bits_offset;
1503 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1504
Yonghong Songb1e88182019-01-15 17:07:47 -08001505 memcpy(print_num, data, nr_copy_bytes);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001506
Okash Khawajab65f3702018-07-10 14:33:07 -07001507#ifdef __BIG_ENDIAN_BITFIELD
1508 left_shift_bits = bits_offset;
1509#else
Yonghong Songb1e88182019-01-15 17:07:47 -08001510 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
Okash Khawajab65f3702018-07-10 14:33:07 -07001511#endif
Yonghong Songb1e88182019-01-15 17:07:47 -08001512 right_shift_bits = BITS_PER_U128 - nr_bits;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001513
Yonghong Songb1e88182019-01-15 17:07:47 -08001514 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1515 btf_int128_print(m, print_num);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001516}
1517
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001518
Yonghong Songf97be3a2018-12-15 22:13:50 -08001519static void btf_int_bits_seq_show(const struct btf *btf,
1520 const struct btf_type *t,
1521 void *data, u8 bits_offset,
1522 struct seq_file *m)
1523{
1524 u32 int_data = btf_type_int(t);
1525 u8 nr_bits = BTF_INT_BITS(int_data);
1526 u8 total_bits_offset;
1527
1528 /*
1529 * bits_offset is at most 7.
Yonghong Songb1e88182019-01-15 17:07:47 -08001530 * BTF_INT_OFFSET() cannot exceed 128 bits.
Yonghong Songf97be3a2018-12-15 22:13:50 -08001531 */
1532 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
Yonghong Song17e3ac82019-01-10 11:14:00 -08001533 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1534 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1535 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
Yonghong Songf97be3a2018-12-15 22:13:50 -08001536}
1537
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001538static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1539 u32 type_id, void *data, u8 bits_offset,
1540 struct seq_file *m)
1541{
1542 u32 int_data = btf_type_int(t);
1543 u8 encoding = BTF_INT_ENCODING(int_data);
1544 bool sign = encoding & BTF_INT_SIGNED;
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -07001545 u8 nr_bits = BTF_INT_BITS(int_data);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001546
1547 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1548 BITS_PER_BYTE_MASKED(nr_bits)) {
1549 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1550 return;
1551 }
1552
1553 switch (nr_bits) {
Yonghong Songb1e88182019-01-15 17:07:47 -08001554 case 128:
1555 btf_int128_print(m, data);
1556 break;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001557 case 64:
1558 if (sign)
1559 seq_printf(m, "%lld", *(s64 *)data);
1560 else
1561 seq_printf(m, "%llu", *(u64 *)data);
1562 break;
1563 case 32:
1564 if (sign)
1565 seq_printf(m, "%d", *(s32 *)data);
1566 else
1567 seq_printf(m, "%u", *(u32 *)data);
1568 break;
1569 case 16:
1570 if (sign)
1571 seq_printf(m, "%d", *(s16 *)data);
1572 else
1573 seq_printf(m, "%u", *(u16 *)data);
1574 break;
1575 case 8:
1576 if (sign)
1577 seq_printf(m, "%d", *(s8 *)data);
1578 else
1579 seq_printf(m, "%u", *(u8 *)data);
1580 break;
1581 default:
1582 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1583 }
1584}
1585
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001586static const struct btf_kind_operations int_ops = {
1587 .check_meta = btf_int_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001588 .resolve = btf_df_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001589 .check_member = btf_int_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001590 .check_kflag_member = btf_int_check_kflag_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001591 .log_details = btf_int_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001592 .seq_show = btf_int_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001593};
1594
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001595static int btf_modifier_check_member(struct btf_verifier_env *env,
1596 const struct btf_type *struct_type,
1597 const struct btf_member *member,
1598 const struct btf_type *member_type)
1599{
1600 const struct btf_type *resolved_type;
1601 u32 resolved_type_id = member->type;
1602 struct btf_member resolved_member;
1603 struct btf *btf = env->btf;
1604
1605 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1606 if (!resolved_type) {
1607 btf_verifier_log_member(env, struct_type, member,
1608 "Invalid member");
1609 return -EINVAL;
1610 }
1611
1612 resolved_member = *member;
1613 resolved_member.type = resolved_type_id;
1614
1615 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1616 &resolved_member,
1617 resolved_type);
1618}
1619
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001620static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1621 const struct btf_type *struct_type,
1622 const struct btf_member *member,
1623 const struct btf_type *member_type)
1624{
1625 const struct btf_type *resolved_type;
1626 u32 resolved_type_id = member->type;
1627 struct btf_member resolved_member;
1628 struct btf *btf = env->btf;
1629
1630 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1631 if (!resolved_type) {
1632 btf_verifier_log_member(env, struct_type, member,
1633 "Invalid member");
1634 return -EINVAL;
1635 }
1636
1637 resolved_member = *member;
1638 resolved_member.type = resolved_type_id;
1639
1640 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1641 &resolved_member,
1642 resolved_type);
1643}
1644
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001645static int btf_ptr_check_member(struct btf_verifier_env *env,
1646 const struct btf_type *struct_type,
1647 const struct btf_member *member,
1648 const struct btf_type *member_type)
1649{
1650 u32 struct_size, struct_bits_off, bytes_offset;
1651
1652 struct_size = struct_type->size;
1653 struct_bits_off = member->offset;
1654 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1655
1656 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1657 btf_verifier_log_member(env, struct_type, member,
1658 "Member is not byte aligned");
1659 return -EINVAL;
1660 }
1661
1662 if (struct_size - bytes_offset < sizeof(void *)) {
1663 btf_verifier_log_member(env, struct_type, member,
1664 "Member exceeds struct_size");
1665 return -EINVAL;
1666 }
1667
1668 return 0;
1669}
1670
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001671static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1672 const struct btf_type *t,
1673 u32 meta_left)
1674{
1675 if (btf_type_vlen(t)) {
1676 btf_verifier_log_type(env, t, "vlen != 0");
1677 return -EINVAL;
1678 }
1679
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001680 if (btf_type_kflag(t)) {
1681 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1682 return -EINVAL;
1683 }
1684
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001685 if (!BTF_TYPE_ID_VALID(t->type)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001686 btf_verifier_log_type(env, t, "Invalid type_id");
1687 return -EINVAL;
1688 }
1689
Yonghong Songeb04bbb2018-11-27 13:23:28 -08001690 /* typedef type must have a valid name, and other ref types,
1691 * volatile, const, restrict, should have a null name.
1692 */
1693 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1694 if (!t->name_off ||
1695 !btf_name_valid_identifier(env->btf, t->name_off)) {
1696 btf_verifier_log_type(env, t, "Invalid name");
1697 return -EINVAL;
1698 }
1699 } else {
1700 if (t->name_off) {
1701 btf_verifier_log_type(env, t, "Invalid name");
1702 return -EINVAL;
1703 }
1704 }
1705
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001706 btf_verifier_log_type(env, t, NULL);
1707
1708 return 0;
1709}
1710
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001711static int btf_modifier_resolve(struct btf_verifier_env *env,
1712 const struct resolve_vertex *v)
1713{
1714 const struct btf_type *t = v->t;
1715 const struct btf_type *next_type;
1716 u32 next_type_id = t->type;
1717 struct btf *btf = env->btf;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001718
1719 next_type = btf_type_by_id(btf, next_type_id);
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001720 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001721 btf_verifier_log_type(env, v->t, "Invalid type_id");
1722 return -EINVAL;
1723 }
1724
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001725 if (!env_type_is_resolve_sink(env, next_type) &&
1726 !env_type_is_resolved(env, next_type_id))
1727 return env_stack_push(env, next_type, next_type_id);
1728
1729 /* Figure out the resolved next_type_id with size.
1730 * They will be stored in the current modifier's
1731 * resolved_ids and resolved_sizes such that it can
1732 * save us a few type-following when we use it later (e.g. in
1733 * pretty print).
1734 */
Andrii Nakryiko1acc5d52019-07-12 10:25:55 -07001735 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
Martin KaFai Lau2667a262018-11-19 15:29:08 -08001736 if (env_type_is_resolved(env, next_type_id))
1737 next_type = btf_type_id_resolve(btf, &next_type_id);
1738
1739 /* "typedef void new_void", "const void"...etc */
1740 if (!btf_type_is_void(next_type) &&
Yonghong Song81f5c6f2019-01-29 16:38:16 -08001741 !btf_type_is_fwd(next_type) &&
1742 !btf_type_is_func_proto(next_type)) {
Martin KaFai Lau2667a262018-11-19 15:29:08 -08001743 btf_verifier_log_type(env, v->t, "Invalid type_id");
1744 return -EINVAL;
1745 }
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001746 }
1747
Andrii Nakryiko1acc5d52019-07-12 10:25:55 -07001748 env_stack_pop_resolved(env, next_type_id, 0);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001749
1750 return 0;
1751}
1752
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001753static int btf_var_resolve(struct btf_verifier_env *env,
1754 const struct resolve_vertex *v)
1755{
1756 const struct btf_type *next_type;
1757 const struct btf_type *t = v->t;
1758 u32 next_type_id = t->type;
1759 struct btf *btf = env->btf;
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001760
1761 next_type = btf_type_by_id(btf, next_type_id);
1762 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1763 btf_verifier_log_type(env, v->t, "Invalid type_id");
1764 return -EINVAL;
1765 }
1766
1767 if (!env_type_is_resolve_sink(env, next_type) &&
1768 !env_type_is_resolved(env, next_type_id))
1769 return env_stack_push(env, next_type, next_type_id);
1770
1771 if (btf_type_is_modifier(next_type)) {
1772 const struct btf_type *resolved_type;
1773 u32 resolved_type_id;
1774
1775 resolved_type_id = next_type_id;
1776 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1777
1778 if (btf_type_is_ptr(resolved_type) &&
1779 !env_type_is_resolve_sink(env, resolved_type) &&
1780 !env_type_is_resolved(env, resolved_type_id))
1781 return env_stack_push(env, resolved_type,
1782 resolved_type_id);
1783 }
1784
1785 /* We must resolve to something concrete at this point, no
1786 * forward types or similar that would resolve to size of
1787 * zero is allowed.
1788 */
Andrii Nakryiko1acc5d52019-07-12 10:25:55 -07001789 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001790 btf_verifier_log_type(env, v->t, "Invalid type_id");
1791 return -EINVAL;
1792 }
1793
Andrii Nakryiko1acc5d52019-07-12 10:25:55 -07001794 env_stack_pop_resolved(env, next_type_id, 0);
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001795
1796 return 0;
1797}
1798
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001799static int btf_ptr_resolve(struct btf_verifier_env *env,
1800 const struct resolve_vertex *v)
1801{
1802 const struct btf_type *next_type;
1803 const struct btf_type *t = v->t;
1804 u32 next_type_id = t->type;
1805 struct btf *btf = env->btf;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001806
1807 next_type = btf_type_by_id(btf, next_type_id);
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001808 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001809 btf_verifier_log_type(env, v->t, "Invalid type_id");
1810 return -EINVAL;
1811 }
1812
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001813 if (!env_type_is_resolve_sink(env, next_type) &&
1814 !env_type_is_resolved(env, next_type_id))
1815 return env_stack_push(env, next_type, next_type_id);
1816
1817 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1818 * the modifier may have stopped resolving when it was resolved
1819 * to a ptr (last-resolved-ptr).
1820 *
1821 * We now need to continue from the last-resolved-ptr to
1822 * ensure the last-resolved-ptr will not referring back to
1823 * the currenct ptr (t).
1824 */
1825 if (btf_type_is_modifier(next_type)) {
1826 const struct btf_type *resolved_type;
1827 u32 resolved_type_id;
1828
1829 resolved_type_id = next_type_id;
1830 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1831
1832 if (btf_type_is_ptr(resolved_type) &&
1833 !env_type_is_resolve_sink(env, resolved_type) &&
1834 !env_type_is_resolved(env, resolved_type_id))
1835 return env_stack_push(env, resolved_type,
1836 resolved_type_id);
1837 }
1838
Martin KaFai Lau2667a262018-11-19 15:29:08 -08001839 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1840 if (env_type_is_resolved(env, next_type_id))
1841 next_type = btf_type_id_resolve(btf, &next_type_id);
1842
1843 if (!btf_type_is_void(next_type) &&
1844 !btf_type_is_fwd(next_type) &&
1845 !btf_type_is_func_proto(next_type)) {
1846 btf_verifier_log_type(env, v->t, "Invalid type_id");
1847 return -EINVAL;
1848 }
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001849 }
1850
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001851 env_stack_pop_resolved(env, next_type_id, 0);
1852
1853 return 0;
1854}
1855
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001856static void btf_modifier_seq_show(const struct btf *btf,
1857 const struct btf_type *t,
1858 u32 type_id, void *data,
1859 u8 bits_offset, struct seq_file *m)
1860{
1861 t = btf_type_id_resolve(btf, &type_id);
1862
1863 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1864}
1865
Daniel Borkmann1dc92852019-04-09 23:20:09 +02001866static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1867 u32 type_id, void *data, u8 bits_offset,
1868 struct seq_file *m)
1869{
1870 t = btf_type_id_resolve(btf, &type_id);
1871
1872 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1873}
1874
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001875static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1876 u32 type_id, void *data, u8 bits_offset,
1877 struct seq_file *m)
1878{
1879 /* It is a hashed value */
1880 seq_printf(m, "%p", *(void **)data);
1881}
1882
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001883static void btf_ref_type_log(struct btf_verifier_env *env,
1884 const struct btf_type *t)
1885{
1886 btf_verifier_log(env, "type_id=%u", t->type);
1887}
1888
1889static struct btf_kind_operations modifier_ops = {
1890 .check_meta = btf_ref_type_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001891 .resolve = btf_modifier_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001892 .check_member = btf_modifier_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001893 .check_kflag_member = btf_modifier_check_kflag_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001894 .log_details = btf_ref_type_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001895 .seq_show = btf_modifier_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001896};
1897
1898static struct btf_kind_operations ptr_ops = {
1899 .check_meta = btf_ref_type_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001900 .resolve = btf_ptr_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001901 .check_member = btf_ptr_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001902 .check_kflag_member = btf_generic_check_kflag_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001903 .log_details = btf_ref_type_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001904 .seq_show = btf_ptr_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001905};
1906
Martin KaFai Lau81753832018-06-02 09:06:51 -07001907static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1908 const struct btf_type *t,
1909 u32 meta_left)
1910{
1911 if (btf_type_vlen(t)) {
1912 btf_verifier_log_type(env, t, "vlen != 0");
1913 return -EINVAL;
1914 }
1915
1916 if (t->type) {
1917 btf_verifier_log_type(env, t, "type != 0");
1918 return -EINVAL;
1919 }
1920
Yonghong Songeb04bbb2018-11-27 13:23:28 -08001921 /* fwd type must have a valid name */
1922 if (!t->name_off ||
1923 !btf_name_valid_identifier(env->btf, t->name_off)) {
1924 btf_verifier_log_type(env, t, "Invalid name");
1925 return -EINVAL;
1926 }
1927
Martin KaFai Lau81753832018-06-02 09:06:51 -07001928 btf_verifier_log_type(env, t, NULL);
1929
1930 return 0;
1931}
1932
Yonghong Song76c43ae2018-12-18 13:43:58 -08001933static void btf_fwd_type_log(struct btf_verifier_env *env,
1934 const struct btf_type *t)
1935{
1936 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1937}
1938
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001939static struct btf_kind_operations fwd_ops = {
Martin KaFai Lau81753832018-06-02 09:06:51 -07001940 .check_meta = btf_fwd_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001941 .resolve = btf_df_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001942 .check_member = btf_df_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08001943 .check_kflag_member = btf_df_check_kflag_member,
Yonghong Song76c43ae2018-12-18 13:43:58 -08001944 .log_details = btf_fwd_type_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001945 .seq_show = btf_df_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001946};
1947
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001948static int btf_array_check_member(struct btf_verifier_env *env,
1949 const struct btf_type *struct_type,
1950 const struct btf_member *member,
1951 const struct btf_type *member_type)
1952{
1953 u32 struct_bits_off = member->offset;
1954 u32 struct_size, bytes_offset;
1955 u32 array_type_id, array_size;
1956 struct btf *btf = env->btf;
1957
1958 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1959 btf_verifier_log_member(env, struct_type, member,
1960 "Member is not byte aligned");
1961 return -EINVAL;
1962 }
1963
1964 array_type_id = member->type;
1965 btf_type_id_size(btf, &array_type_id, &array_size);
1966 struct_size = struct_type->size;
1967 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1968 if (struct_size - bytes_offset < array_size) {
1969 btf_verifier_log_member(env, struct_type, member,
1970 "Member exceeds struct_size");
1971 return -EINVAL;
1972 }
1973
1974 return 0;
1975}
1976
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001977static s32 btf_array_check_meta(struct btf_verifier_env *env,
1978 const struct btf_type *t,
1979 u32 meta_left)
1980{
1981 const struct btf_array *array = btf_type_array(t);
1982 u32 meta_needed = sizeof(*array);
1983
1984 if (meta_left < meta_needed) {
1985 btf_verifier_log_basic(env, t,
1986 "meta_left:%u meta_needed:%u",
1987 meta_left, meta_needed);
1988 return -EINVAL;
1989 }
1990
Yonghong Songeb04bbb2018-11-27 13:23:28 -08001991 /* array type should not have a name */
1992 if (t->name_off) {
1993 btf_verifier_log_type(env, t, "Invalid name");
1994 return -EINVAL;
1995 }
1996
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001997 if (btf_type_vlen(t)) {
1998 btf_verifier_log_type(env, t, "vlen != 0");
1999 return -EINVAL;
2000 }
2001
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002002 if (btf_type_kflag(t)) {
2003 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2004 return -EINVAL;
2005 }
2006
Martin KaFai Laub9308ae2018-06-02 09:06:50 -07002007 if (t->size) {
2008 btf_verifier_log_type(env, t, "size != 0");
2009 return -EINVAL;
2010 }
2011
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002012 /* Array elem type and index type cannot be in type void,
2013 * so !array->type and !array->index_type are not allowed.
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002014 */
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07002015 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002016 btf_verifier_log_type(env, t, "Invalid elem");
2017 return -EINVAL;
2018 }
2019
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07002020 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002021 btf_verifier_log_type(env, t, "Invalid index");
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002022 return -EINVAL;
2023 }
2024
2025 btf_verifier_log_type(env, t, NULL);
2026
2027 return meta_needed;
2028}
2029
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002030static int btf_array_resolve(struct btf_verifier_env *env,
2031 const struct resolve_vertex *v)
2032{
2033 const struct btf_array *array = btf_type_array(v->t);
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002034 const struct btf_type *elem_type, *index_type;
2035 u32 elem_type_id, index_type_id;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002036 struct btf *btf = env->btf;
2037 u32 elem_size;
2038
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002039 /* Check array->index_type */
2040 index_type_id = array->index_type;
2041 index_type = btf_type_by_id(btf, index_type_id);
Stanislav Fomicheve4f07122019-06-19 12:01:05 -07002042 if (btf_type_nosize_or_null(index_type) ||
2043 btf_type_is_resolve_source_only(index_type)) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002044 btf_verifier_log_type(env, v->t, "Invalid index");
2045 return -EINVAL;
2046 }
2047
2048 if (!env_type_is_resolve_sink(env, index_type) &&
2049 !env_type_is_resolved(env, index_type_id))
2050 return env_stack_push(env, index_type, index_type_id);
2051
2052 index_type = btf_type_id_size(btf, &index_type_id, NULL);
2053 if (!index_type || !btf_type_is_int(index_type) ||
2054 !btf_type_int_is_regular(index_type)) {
2055 btf_verifier_log_type(env, v->t, "Invalid index");
2056 return -EINVAL;
2057 }
2058
2059 /* Check array->type */
2060 elem_type_id = array->type;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002061 elem_type = btf_type_by_id(btf, elem_type_id);
Stanislav Fomicheve4f07122019-06-19 12:01:05 -07002062 if (btf_type_nosize_or_null(elem_type) ||
2063 btf_type_is_resolve_source_only(elem_type)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002064 btf_verifier_log_type(env, v->t,
2065 "Invalid elem");
2066 return -EINVAL;
2067 }
2068
2069 if (!env_type_is_resolve_sink(env, elem_type) &&
2070 !env_type_is_resolved(env, elem_type_id))
2071 return env_stack_push(env, elem_type, elem_type_id);
2072
2073 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2074 if (!elem_type) {
2075 btf_verifier_log_type(env, v->t, "Invalid elem");
2076 return -EINVAL;
2077 }
2078
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07002079 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2080 btf_verifier_log_type(env, v->t, "Invalid array of int");
2081 return -EINVAL;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002082 }
2083
2084 if (array->nelems && elem_size > U32_MAX / array->nelems) {
2085 btf_verifier_log_type(env, v->t,
2086 "Array size overflows U32_MAX");
2087 return -EINVAL;
2088 }
2089
2090 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2091
2092 return 0;
2093}
2094
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002095static void btf_array_log(struct btf_verifier_env *env,
2096 const struct btf_type *t)
2097{
2098 const struct btf_array *array = btf_type_array(t);
2099
2100 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2101 array->type, array->index_type, array->nelems);
2102}
2103
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002104static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2105 u32 type_id, void *data, u8 bits_offset,
2106 struct seq_file *m)
2107{
2108 const struct btf_array *array = btf_type_array(t);
2109 const struct btf_kind_operations *elem_ops;
2110 const struct btf_type *elem_type;
2111 u32 i, elem_size, elem_type_id;
2112
2113 elem_type_id = array->type;
2114 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2115 elem_ops = btf_type_ops(elem_type);
2116 seq_puts(m, "[");
2117 for (i = 0; i < array->nelems; i++) {
2118 if (i)
2119 seq_puts(m, ",");
2120
2121 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2122 bits_offset, m);
2123 data += elem_size;
2124 }
2125 seq_puts(m, "]");
2126}
2127
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002128static struct btf_kind_operations array_ops = {
2129 .check_meta = btf_array_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002130 .resolve = btf_array_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002131 .check_member = btf_array_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002132 .check_kflag_member = btf_generic_check_kflag_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002133 .log_details = btf_array_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002134 .seq_show = btf_array_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002135};
2136
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002137static int btf_struct_check_member(struct btf_verifier_env *env,
2138 const struct btf_type *struct_type,
2139 const struct btf_member *member,
2140 const struct btf_type *member_type)
2141{
2142 u32 struct_bits_off = member->offset;
2143 u32 struct_size, bytes_offset;
2144
2145 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2146 btf_verifier_log_member(env, struct_type, member,
2147 "Member is not byte aligned");
2148 return -EINVAL;
2149 }
2150
2151 struct_size = struct_type->size;
2152 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2153 if (struct_size - bytes_offset < member_type->size) {
2154 btf_verifier_log_member(env, struct_type, member,
2155 "Member exceeds struct_size");
2156 return -EINVAL;
2157 }
2158
2159 return 0;
2160}
2161
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002162static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2163 const struct btf_type *t,
2164 u32 meta_left)
2165{
2166 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2167 const struct btf_member *member;
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07002168 u32 meta_needed, last_offset;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002169 struct btf *btf = env->btf;
2170 u32 struct_size = t->size;
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002171 u32 offset;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002172 u16 i;
2173
2174 meta_needed = btf_type_vlen(t) * sizeof(*member);
2175 if (meta_left < meta_needed) {
2176 btf_verifier_log_basic(env, t,
2177 "meta_left:%u meta_needed:%u",
2178 meta_left, meta_needed);
2179 return -EINVAL;
2180 }
2181
Yonghong Songeb04bbb2018-11-27 13:23:28 -08002182 /* struct type either no name or a valid one */
2183 if (t->name_off &&
2184 !btf_name_valid_identifier(env->btf, t->name_off)) {
2185 btf_verifier_log_type(env, t, "Invalid name");
2186 return -EINVAL;
2187 }
2188
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002189 btf_verifier_log_type(env, t, NULL);
2190
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07002191 last_offset = 0;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002192 for_each_member(i, t, member) {
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07002193 if (!btf_name_offset_valid(btf, member->name_off)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002194 btf_verifier_log_member(env, t, member,
2195 "Invalid member name_offset:%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07002196 member->name_off);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002197 return -EINVAL;
2198 }
2199
Yonghong Songeb04bbb2018-11-27 13:23:28 -08002200 /* struct member either no name or a valid one */
2201 if (member->name_off &&
2202 !btf_name_valid_identifier(btf, member->name_off)) {
2203 btf_verifier_log_member(env, t, member, "Invalid name");
2204 return -EINVAL;
2205 }
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002206 /* A member cannot be in type void */
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07002207 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002208 btf_verifier_log_member(env, t, member,
2209 "Invalid type_id");
2210 return -EINVAL;
2211 }
2212
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002213 offset = btf_member_bit_offset(t, member);
2214 if (is_union && offset) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002215 btf_verifier_log_member(env, t, member,
2216 "Invalid member bits_offset");
2217 return -EINVAL;
2218 }
2219
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07002220 /*
2221 * ">" instead of ">=" because the last member could be
2222 * "char a[0];"
2223 */
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002224 if (last_offset > offset) {
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07002225 btf_verifier_log_member(env, t, member,
2226 "Invalid member bits_offset");
2227 return -EINVAL;
2228 }
2229
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002230 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002231 btf_verifier_log_member(env, t, member,
Colin Ian King311fe1a2018-11-25 23:32:51 +00002232 "Member bits_offset exceeds its struct size");
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002233 return -EINVAL;
2234 }
2235
2236 btf_verifier_log_member(env, t, member, NULL);
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002237 last_offset = offset;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002238 }
2239
2240 return meta_needed;
2241}
2242
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002243static int btf_struct_resolve(struct btf_verifier_env *env,
2244 const struct resolve_vertex *v)
2245{
2246 const struct btf_member *member;
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002247 int err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002248 u16 i;
2249
2250 /* Before continue resolving the next_member,
2251 * ensure the last member is indeed resolved to a
2252 * type with size info.
2253 */
2254 if (v->next_member) {
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002255 const struct btf_type *last_member_type;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002256 const struct btf_member *last_member;
2257 u16 last_member_type_id;
2258
2259 last_member = btf_type_member(v->t) + v->next_member - 1;
2260 last_member_type_id = last_member->type;
2261 if (WARN_ON_ONCE(!env_type_is_resolved(env,
2262 last_member_type_id)))
2263 return -EINVAL;
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002264
2265 last_member_type = btf_type_by_id(env->btf,
2266 last_member_type_id);
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002267 if (btf_type_kflag(v->t))
2268 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2269 last_member,
2270 last_member_type);
2271 else
2272 err = btf_type_ops(last_member_type)->check_member(env, v->t,
2273 last_member,
2274 last_member_type);
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002275 if (err)
2276 return err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002277 }
2278
2279 for_each_member_from(i, v->next_member, v->t, member) {
2280 u32 member_type_id = member->type;
2281 const struct btf_type *member_type = btf_type_by_id(env->btf,
2282 member_type_id);
2283
Stanislav Fomicheve4f07122019-06-19 12:01:05 -07002284 if (btf_type_nosize_or_null(member_type) ||
2285 btf_type_is_resolve_source_only(member_type)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002286 btf_verifier_log_member(env, v->t, member,
2287 "Invalid member");
2288 return -EINVAL;
2289 }
2290
2291 if (!env_type_is_resolve_sink(env, member_type) &&
2292 !env_type_is_resolved(env, member_type_id)) {
2293 env_stack_set_next_member(env, i + 1);
2294 return env_stack_push(env, member_type, member_type_id);
2295 }
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002296
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002297 if (btf_type_kflag(v->t))
2298 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2299 member,
2300 member_type);
2301 else
2302 err = btf_type_ops(member_type)->check_member(env, v->t,
2303 member,
2304 member_type);
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002305 if (err)
2306 return err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002307 }
2308
2309 env_stack_pop_resolved(env, 0, 0);
2310
2311 return 0;
2312}
2313
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002314static void btf_struct_log(struct btf_verifier_env *env,
2315 const struct btf_type *t)
2316{
2317 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2318}
2319
Alexei Starovoitovd83525c2019-01-31 15:40:04 -08002320/* find 'struct bpf_spin_lock' in map value.
2321 * return >= 0 offset if found
2322 * and < 0 in case of error
2323 */
2324int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2325{
2326 const struct btf_member *member;
2327 u32 i, off = -ENOENT;
2328
2329 if (!__btf_type_is_struct(t))
2330 return -EINVAL;
2331
2332 for_each_member(i, t, member) {
2333 const struct btf_type *member_type = btf_type_by_id(btf,
2334 member->type);
2335 if (!__btf_type_is_struct(member_type))
2336 continue;
2337 if (member_type->size != sizeof(struct bpf_spin_lock))
2338 continue;
2339 if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2340 "bpf_spin_lock"))
2341 continue;
2342 if (off != -ENOENT)
2343 /* only one 'struct bpf_spin_lock' is allowed */
2344 return -E2BIG;
2345 off = btf_member_bit_offset(t, member);
2346 if (off % 8)
2347 /* valid C code cannot generate such BTF */
2348 return -EINVAL;
2349 off /= 8;
2350 if (off % __alignof__(struct bpf_spin_lock))
2351 /* valid struct bpf_spin_lock will be 4 byte aligned */
2352 return -EINVAL;
2353 }
2354 return off;
2355}
2356
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002357static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2358 u32 type_id, void *data, u8 bits_offset,
2359 struct seq_file *m)
2360{
2361 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2362 const struct btf_member *member;
2363 u32 i;
2364
2365 seq_puts(m, "{");
2366 for_each_member(i, t, member) {
2367 const struct btf_type *member_type = btf_type_by_id(btf,
2368 member->type);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002369 const struct btf_kind_operations *ops;
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002370 u32 member_offset, bitfield_size;
2371 u32 bytes_offset;
2372 u8 bits8_offset;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002373
2374 if (i)
2375 seq_puts(m, seq);
2376
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002377 member_offset = btf_member_bit_offset(t, member);
2378 bitfield_size = btf_member_bitfield_size(t, member);
Yonghong Song17e3ac82019-01-10 11:14:00 -08002379 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2380 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002381 if (bitfield_size) {
Yonghong Song17e3ac82019-01-10 11:14:00 -08002382 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002383 bitfield_size, m);
2384 } else {
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002385 ops = btf_type_ops(member_type);
2386 ops->seq_show(btf, member_type, member->type,
2387 data + bytes_offset, bits8_offset, m);
2388 }
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002389 }
2390 seq_puts(m, "}");
2391}
2392
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002393static struct btf_kind_operations struct_ops = {
2394 .check_meta = btf_struct_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002395 .resolve = btf_struct_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002396 .check_member = btf_struct_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002397 .check_kflag_member = btf_generic_check_kflag_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002398 .log_details = btf_struct_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002399 .seq_show = btf_struct_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002400};
2401
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002402static int btf_enum_check_member(struct btf_verifier_env *env,
2403 const struct btf_type *struct_type,
2404 const struct btf_member *member,
2405 const struct btf_type *member_type)
2406{
2407 u32 struct_bits_off = member->offset;
2408 u32 struct_size, bytes_offset;
2409
2410 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2411 btf_verifier_log_member(env, struct_type, member,
2412 "Member is not byte aligned");
2413 return -EINVAL;
2414 }
2415
2416 struct_size = struct_type->size;
2417 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2418 if (struct_size - bytes_offset < sizeof(int)) {
2419 btf_verifier_log_member(env, struct_type, member,
2420 "Member exceeds struct_size");
2421 return -EINVAL;
2422 }
2423
2424 return 0;
2425}
2426
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002427static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2428 const struct btf_type *struct_type,
2429 const struct btf_member *member,
2430 const struct btf_type *member_type)
2431{
2432 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2433 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2434
2435 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2436 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2437 if (!nr_bits) {
2438 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2439 btf_verifier_log_member(env, struct_type, member,
2440 "Member is not byte aligned");
Colin Ian Kinge3439af2019-09-25 10:38:35 +01002441 return -EINVAL;
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002442 }
2443
2444 nr_bits = int_bitsize;
2445 } else if (nr_bits > int_bitsize) {
2446 btf_verifier_log_member(env, struct_type, member,
2447 "Invalid member bitfield_size");
2448 return -EINVAL;
2449 }
2450
2451 struct_size = struct_type->size;
2452 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2453 if (struct_size < bytes_end) {
2454 btf_verifier_log_member(env, struct_type, member,
2455 "Member exceeds struct_size");
2456 return -EINVAL;
2457 }
2458
2459 return 0;
2460}
2461
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002462static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2463 const struct btf_type *t,
2464 u32 meta_left)
2465{
2466 const struct btf_enum *enums = btf_type_enum(t);
2467 struct btf *btf = env->btf;
2468 u16 i, nr_enums;
2469 u32 meta_needed;
2470
2471 nr_enums = btf_type_vlen(t);
2472 meta_needed = nr_enums * sizeof(*enums);
2473
2474 if (meta_left < meta_needed) {
2475 btf_verifier_log_basic(env, t,
2476 "meta_left:%u meta_needed:%u",
2477 meta_left, meta_needed);
2478 return -EINVAL;
2479 }
2480
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002481 if (btf_type_kflag(t)) {
2482 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2483 return -EINVAL;
2484 }
2485
Alexei Starovoitov9eea9842019-09-17 10:45:37 -07002486 if (t->size > 8 || !is_power_of_2(t->size)) {
2487 btf_verifier_log_type(env, t, "Unexpected size");
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002488 return -EINVAL;
2489 }
2490
Yonghong Songeb04bbb2018-11-27 13:23:28 -08002491 /* enum type either no name or a valid one */
2492 if (t->name_off &&
2493 !btf_name_valid_identifier(env->btf, t->name_off)) {
2494 btf_verifier_log_type(env, t, "Invalid name");
2495 return -EINVAL;
2496 }
2497
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002498 btf_verifier_log_type(env, t, NULL);
2499
2500 for (i = 0; i < nr_enums; i++) {
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07002501 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002502 btf_verifier_log(env, "\tInvalid name_offset:%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07002503 enums[i].name_off);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002504 return -EINVAL;
2505 }
2506
Yonghong Songeb04bbb2018-11-27 13:23:28 -08002507 /* enum member must have a valid name */
2508 if (!enums[i].name_off ||
2509 !btf_name_valid_identifier(btf, enums[i].name_off)) {
2510 btf_verifier_log_type(env, t, "Invalid name");
2511 return -EINVAL;
2512 }
2513
Alexei Starovoitov8580ac92019-10-15 20:24:57 -07002514 if (env->log.level == BPF_LOG_KERNEL)
2515 continue;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002516 btf_verifier_log(env, "\t%s val=%d\n",
Martin KaFai Lau23127b32018-12-13 10:41:46 -08002517 __btf_name_by_offset(btf, enums[i].name_off),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002518 enums[i].val);
2519 }
2520
2521 return meta_needed;
2522}
2523
2524static void btf_enum_log(struct btf_verifier_env *env,
2525 const struct btf_type *t)
2526{
2527 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2528}
2529
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002530static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2531 u32 type_id, void *data, u8 bits_offset,
2532 struct seq_file *m)
2533{
2534 const struct btf_enum *enums = btf_type_enum(t);
2535 u32 i, nr_enums = btf_type_vlen(t);
2536 int v = *(int *)data;
2537
2538 for (i = 0; i < nr_enums; i++) {
2539 if (v == enums[i].val) {
2540 seq_printf(m, "%s",
Martin KaFai Lau23127b32018-12-13 10:41:46 -08002541 __btf_name_by_offset(btf,
2542 enums[i].name_off));
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002543 return;
2544 }
2545 }
2546
2547 seq_printf(m, "%d", v);
2548}
2549
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002550static struct btf_kind_operations enum_ops = {
2551 .check_meta = btf_enum_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07002552 .resolve = btf_df_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07002553 .check_member = btf_enum_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002554 .check_kflag_member = btf_enum_check_kflag_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002555 .log_details = btf_enum_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002556 .seq_show = btf_enum_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002557};
2558
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002559static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2560 const struct btf_type *t,
2561 u32 meta_left)
2562{
2563 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2564
2565 if (meta_left < meta_needed) {
2566 btf_verifier_log_basic(env, t,
2567 "meta_left:%u meta_needed:%u",
2568 meta_left, meta_needed);
2569 return -EINVAL;
2570 }
2571
2572 if (t->name_off) {
2573 btf_verifier_log_type(env, t, "Invalid name");
2574 return -EINVAL;
2575 }
2576
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002577 if (btf_type_kflag(t)) {
2578 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2579 return -EINVAL;
2580 }
2581
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002582 btf_verifier_log_type(env, t, NULL);
2583
2584 return meta_needed;
2585}
2586
2587static void btf_func_proto_log(struct btf_verifier_env *env,
2588 const struct btf_type *t)
2589{
2590 const struct btf_param *args = (const struct btf_param *)(t + 1);
2591 u16 nr_args = btf_type_vlen(t), i;
2592
2593 btf_verifier_log(env, "return=%u args=(", t->type);
2594 if (!nr_args) {
2595 btf_verifier_log(env, "void");
2596 goto done;
2597 }
2598
2599 if (nr_args == 1 && !args[0].type) {
2600 /* Only one vararg */
2601 btf_verifier_log(env, "vararg");
2602 goto done;
2603 }
2604
2605 btf_verifier_log(env, "%u %s", args[0].type,
Martin KaFai Lau23127b32018-12-13 10:41:46 -08002606 __btf_name_by_offset(env->btf,
2607 args[0].name_off));
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002608 for (i = 1; i < nr_args - 1; i++)
2609 btf_verifier_log(env, ", %u %s", args[i].type,
Martin KaFai Lau23127b32018-12-13 10:41:46 -08002610 __btf_name_by_offset(env->btf,
2611 args[i].name_off));
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002612
2613 if (nr_args > 1) {
2614 const struct btf_param *last_arg = &args[nr_args - 1];
2615
2616 if (last_arg->type)
2617 btf_verifier_log(env, ", %u %s", last_arg->type,
Martin KaFai Lau23127b32018-12-13 10:41:46 -08002618 __btf_name_by_offset(env->btf,
2619 last_arg->name_off));
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002620 else
2621 btf_verifier_log(env, ", vararg");
2622 }
2623
2624done:
2625 btf_verifier_log(env, ")");
2626}
2627
2628static struct btf_kind_operations func_proto_ops = {
2629 .check_meta = btf_func_proto_check_meta,
2630 .resolve = btf_df_resolve,
2631 /*
2632 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2633 * a struct's member.
2634 *
2635 * It should be a funciton pointer instead.
2636 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2637 *
2638 * Hence, there is no btf_func_check_member().
2639 */
2640 .check_member = btf_df_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002641 .check_kflag_member = btf_df_check_kflag_member,
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002642 .log_details = btf_func_proto_log,
2643 .seq_show = btf_df_seq_show,
2644};
2645
2646static s32 btf_func_check_meta(struct btf_verifier_env *env,
2647 const struct btf_type *t,
2648 u32 meta_left)
2649{
2650 if (!t->name_off ||
2651 !btf_name_valid_identifier(env->btf, t->name_off)) {
2652 btf_verifier_log_type(env, t, "Invalid name");
2653 return -EINVAL;
2654 }
2655
2656 if (btf_type_vlen(t)) {
2657 btf_verifier_log_type(env, t, "vlen != 0");
2658 return -EINVAL;
2659 }
2660
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002661 if (btf_type_kflag(t)) {
2662 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2663 return -EINVAL;
2664 }
2665
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002666 btf_verifier_log_type(env, t, NULL);
2667
2668 return 0;
2669}
2670
2671static struct btf_kind_operations func_ops = {
2672 .check_meta = btf_func_check_meta,
2673 .resolve = btf_df_resolve,
2674 .check_member = btf_df_check_member,
Yonghong Song9d5f9f72018-12-15 22:13:51 -08002675 .check_kflag_member = btf_df_check_kflag_member,
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002676 .log_details = btf_ref_type_log,
2677 .seq_show = btf_df_seq_show,
2678};
2679
Daniel Borkmann1dc92852019-04-09 23:20:09 +02002680static s32 btf_var_check_meta(struct btf_verifier_env *env,
2681 const struct btf_type *t,
2682 u32 meta_left)
2683{
2684 const struct btf_var *var;
2685 u32 meta_needed = sizeof(*var);
2686
2687 if (meta_left < meta_needed) {
2688 btf_verifier_log_basic(env, t,
2689 "meta_left:%u meta_needed:%u",
2690 meta_left, meta_needed);
2691 return -EINVAL;
2692 }
2693
2694 if (btf_type_vlen(t)) {
2695 btf_verifier_log_type(env, t, "vlen != 0");
2696 return -EINVAL;
2697 }
2698
2699 if (btf_type_kflag(t)) {
2700 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2701 return -EINVAL;
2702 }
2703
2704 if (!t->name_off ||
2705 !__btf_name_valid(env->btf, t->name_off, true)) {
2706 btf_verifier_log_type(env, t, "Invalid name");
2707 return -EINVAL;
2708 }
2709
2710 /* A var cannot be in type void */
2711 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2712 btf_verifier_log_type(env, t, "Invalid type_id");
2713 return -EINVAL;
2714 }
2715
2716 var = btf_type_var(t);
2717 if (var->linkage != BTF_VAR_STATIC &&
2718 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2719 btf_verifier_log_type(env, t, "Linkage not supported");
2720 return -EINVAL;
2721 }
2722
2723 btf_verifier_log_type(env, t, NULL);
2724
2725 return meta_needed;
2726}
2727
2728static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2729{
2730 const struct btf_var *var = btf_type_var(t);
2731
2732 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2733}
2734
2735static const struct btf_kind_operations var_ops = {
2736 .check_meta = btf_var_check_meta,
2737 .resolve = btf_var_resolve,
2738 .check_member = btf_df_check_member,
2739 .check_kflag_member = btf_df_check_kflag_member,
2740 .log_details = btf_var_log,
2741 .seq_show = btf_var_seq_show,
2742};
2743
2744static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2745 const struct btf_type *t,
2746 u32 meta_left)
2747{
2748 const struct btf_var_secinfo *vsi;
2749 u64 last_vsi_end_off = 0, sum = 0;
2750 u32 i, meta_needed;
2751
2752 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2753 if (meta_left < meta_needed) {
2754 btf_verifier_log_basic(env, t,
2755 "meta_left:%u meta_needed:%u",
2756 meta_left, meta_needed);
2757 return -EINVAL;
2758 }
2759
2760 if (!btf_type_vlen(t)) {
2761 btf_verifier_log_type(env, t, "vlen == 0");
2762 return -EINVAL;
2763 }
2764
2765 if (!t->size) {
2766 btf_verifier_log_type(env, t, "size == 0");
2767 return -EINVAL;
2768 }
2769
2770 if (btf_type_kflag(t)) {
2771 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2772 return -EINVAL;
2773 }
2774
2775 if (!t->name_off ||
2776 !btf_name_valid_section(env->btf, t->name_off)) {
2777 btf_verifier_log_type(env, t, "Invalid name");
2778 return -EINVAL;
2779 }
2780
2781 btf_verifier_log_type(env, t, NULL);
2782
2783 for_each_vsi(i, t, vsi) {
2784 /* A var cannot be in type void */
2785 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2786 btf_verifier_log_vsi(env, t, vsi,
2787 "Invalid type_id");
2788 return -EINVAL;
2789 }
2790
2791 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2792 btf_verifier_log_vsi(env, t, vsi,
2793 "Invalid offset");
2794 return -EINVAL;
2795 }
2796
2797 if (!vsi->size || vsi->size > t->size) {
2798 btf_verifier_log_vsi(env, t, vsi,
2799 "Invalid size");
2800 return -EINVAL;
2801 }
2802
2803 last_vsi_end_off = vsi->offset + vsi->size;
2804 if (last_vsi_end_off > t->size) {
2805 btf_verifier_log_vsi(env, t, vsi,
2806 "Invalid offset+size");
2807 return -EINVAL;
2808 }
2809
2810 btf_verifier_log_vsi(env, t, vsi, NULL);
2811 sum += vsi->size;
2812 }
2813
2814 if (t->size < sum) {
2815 btf_verifier_log_type(env, t, "Invalid btf_info size");
2816 return -EINVAL;
2817 }
2818
2819 return meta_needed;
2820}
2821
2822static int btf_datasec_resolve(struct btf_verifier_env *env,
2823 const struct resolve_vertex *v)
2824{
2825 const struct btf_var_secinfo *vsi;
2826 struct btf *btf = env->btf;
2827 u16 i;
2828
2829 for_each_vsi_from(i, v->next_member, v->t, vsi) {
2830 u32 var_type_id = vsi->type, type_id, type_size = 0;
2831 const struct btf_type *var_type = btf_type_by_id(env->btf,
2832 var_type_id);
2833 if (!var_type || !btf_type_is_var(var_type)) {
2834 btf_verifier_log_vsi(env, v->t, vsi,
2835 "Not a VAR kind member");
2836 return -EINVAL;
2837 }
2838
2839 if (!env_type_is_resolve_sink(env, var_type) &&
2840 !env_type_is_resolved(env, var_type_id)) {
2841 env_stack_set_next_member(env, i + 1);
2842 return env_stack_push(env, var_type, var_type_id);
2843 }
2844
2845 type_id = var_type->type;
2846 if (!btf_type_id_size(btf, &type_id, &type_size)) {
2847 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2848 return -EINVAL;
2849 }
2850
2851 if (vsi->size < type_size) {
2852 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2853 return -EINVAL;
2854 }
2855 }
2856
2857 env_stack_pop_resolved(env, 0, 0);
2858 return 0;
2859}
2860
2861static void btf_datasec_log(struct btf_verifier_env *env,
2862 const struct btf_type *t)
2863{
2864 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2865}
2866
2867static void btf_datasec_seq_show(const struct btf *btf,
2868 const struct btf_type *t, u32 type_id,
2869 void *data, u8 bits_offset,
2870 struct seq_file *m)
2871{
2872 const struct btf_var_secinfo *vsi;
2873 const struct btf_type *var;
2874 u32 i;
2875
2876 seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2877 for_each_vsi(i, t, vsi) {
2878 var = btf_type_by_id(btf, vsi->type);
2879 if (i)
2880 seq_puts(m, ",");
2881 btf_type_ops(var)->seq_show(btf, var, vsi->type,
2882 data + vsi->offset, bits_offset, m);
2883 }
2884 seq_puts(m, "}");
2885}
2886
2887static const struct btf_kind_operations datasec_ops = {
2888 .check_meta = btf_datasec_check_meta,
2889 .resolve = btf_datasec_resolve,
2890 .check_member = btf_df_check_member,
2891 .check_kflag_member = btf_df_check_kflag_member,
2892 .log_details = btf_datasec_log,
2893 .seq_show = btf_datasec_seq_show,
2894};
2895
Martin KaFai Lau2667a262018-11-19 15:29:08 -08002896static int btf_func_proto_check(struct btf_verifier_env *env,
2897 const struct btf_type *t)
2898{
2899 const struct btf_type *ret_type;
2900 const struct btf_param *args;
2901 const struct btf *btf;
2902 u16 nr_args, i;
2903 int err;
2904
2905 btf = env->btf;
2906 args = (const struct btf_param *)(t + 1);
2907 nr_args = btf_type_vlen(t);
2908
2909 /* Check func return type which could be "void" (t->type == 0) */
2910 if (t->type) {
2911 u32 ret_type_id = t->type;
2912
2913 ret_type = btf_type_by_id(btf, ret_type_id);
2914 if (!ret_type) {
2915 btf_verifier_log_type(env, t, "Invalid return type");
2916 return -EINVAL;
2917 }
2918
2919 if (btf_type_needs_resolve(ret_type) &&
2920 !env_type_is_resolved(env, ret_type_id)) {
2921 err = btf_resolve(env, ret_type, ret_type_id);
2922 if (err)
2923 return err;
2924 }
2925
2926 /* Ensure the return type is a type that has a size */
2927 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2928 btf_verifier_log_type(env, t, "Invalid return type");
2929 return -EINVAL;
2930 }
2931 }
2932
2933 if (!nr_args)
2934 return 0;
2935
2936 /* Last func arg type_id could be 0 if it is a vararg */
2937 if (!args[nr_args - 1].type) {
2938 if (args[nr_args - 1].name_off) {
2939 btf_verifier_log_type(env, t, "Invalid arg#%u",
2940 nr_args);
2941 return -EINVAL;
2942 }
2943 nr_args--;
2944 }
2945
2946 err = 0;
2947 for (i = 0; i < nr_args; i++) {
2948 const struct btf_type *arg_type;
2949 u32 arg_type_id;
2950
2951 arg_type_id = args[i].type;
2952 arg_type = btf_type_by_id(btf, arg_type_id);
2953 if (!arg_type) {
2954 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2955 err = -EINVAL;
2956 break;
2957 }
2958
2959 if (args[i].name_off &&
2960 (!btf_name_offset_valid(btf, args[i].name_off) ||
2961 !btf_name_valid_identifier(btf, args[i].name_off))) {
2962 btf_verifier_log_type(env, t,
2963 "Invalid arg#%u", i + 1);
2964 err = -EINVAL;
2965 break;
2966 }
2967
2968 if (btf_type_needs_resolve(arg_type) &&
2969 !env_type_is_resolved(env, arg_type_id)) {
2970 err = btf_resolve(env, arg_type, arg_type_id);
2971 if (err)
2972 break;
2973 }
2974
2975 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2976 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2977 err = -EINVAL;
2978 break;
2979 }
2980 }
2981
2982 return err;
2983}
2984
2985static int btf_func_check(struct btf_verifier_env *env,
2986 const struct btf_type *t)
2987{
2988 const struct btf_type *proto_type;
2989 const struct btf_param *args;
2990 const struct btf *btf;
2991 u16 nr_args, i;
2992
2993 btf = env->btf;
2994 proto_type = btf_type_by_id(btf, t->type);
2995
2996 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2997 btf_verifier_log_type(env, t, "Invalid type_id");
2998 return -EINVAL;
2999 }
3000
3001 args = (const struct btf_param *)(proto_type + 1);
3002 nr_args = btf_type_vlen(proto_type);
3003 for (i = 0; i < nr_args; i++) {
3004 if (!args[i].name_off && args[i].type) {
3005 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3006 return -EINVAL;
3007 }
3008 }
3009
3010 return 0;
3011}
3012
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003013static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
3014 [BTF_KIND_INT] = &int_ops,
3015 [BTF_KIND_PTR] = &ptr_ops,
3016 [BTF_KIND_ARRAY] = &array_ops,
3017 [BTF_KIND_STRUCT] = &struct_ops,
3018 [BTF_KIND_UNION] = &struct_ops,
3019 [BTF_KIND_ENUM] = &enum_ops,
3020 [BTF_KIND_FWD] = &fwd_ops,
3021 [BTF_KIND_TYPEDEF] = &modifier_ops,
3022 [BTF_KIND_VOLATILE] = &modifier_ops,
3023 [BTF_KIND_CONST] = &modifier_ops,
3024 [BTF_KIND_RESTRICT] = &modifier_ops,
Martin KaFai Lau2667a262018-11-19 15:29:08 -08003025 [BTF_KIND_FUNC] = &func_ops,
3026 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
Daniel Borkmann1dc92852019-04-09 23:20:09 +02003027 [BTF_KIND_VAR] = &var_ops,
3028 [BTF_KIND_DATASEC] = &datasec_ops,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003029};
3030
3031static s32 btf_check_meta(struct btf_verifier_env *env,
3032 const struct btf_type *t,
3033 u32 meta_left)
3034{
3035 u32 saved_meta_left = meta_left;
3036 s32 var_meta_size;
3037
3038 if (meta_left < sizeof(*t)) {
3039 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3040 env->log_type_id, meta_left, sizeof(*t));
3041 return -EINVAL;
3042 }
3043 meta_left -= sizeof(*t);
3044
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07003045 if (t->info & ~BTF_INFO_MASK) {
3046 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3047 env->log_type_id, t->info);
3048 return -EINVAL;
3049 }
3050
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003051 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3052 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3053 btf_verifier_log(env, "[%u] Invalid kind:%u",
3054 env->log_type_id, BTF_INFO_KIND(t->info));
3055 return -EINVAL;
3056 }
3057
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07003058 if (!btf_name_offset_valid(env->btf, t->name_off)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003059 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07003060 env->log_type_id, t->name_off);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003061 return -EINVAL;
3062 }
3063
3064 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3065 if (var_meta_size < 0)
3066 return var_meta_size;
3067
3068 meta_left -= var_meta_size;
3069
3070 return saved_meta_left - meta_left;
3071}
3072
3073static int btf_check_all_metas(struct btf_verifier_env *env)
3074{
3075 struct btf *btf = env->btf;
3076 struct btf_header *hdr;
3077 void *cur, *end;
3078
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003079 hdr = &btf->hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003080 cur = btf->nohdr_data + hdr->type_off;
Martin KaFai Lau4b1c5d92018-09-12 10:29:11 -07003081 end = cur + hdr->type_len;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003082
3083 env->log_type_id = 1;
3084 while (cur < end) {
3085 struct btf_type *t = cur;
3086 s32 meta_size;
3087
3088 meta_size = btf_check_meta(env, t, end - cur);
3089 if (meta_size < 0)
3090 return meta_size;
3091
3092 btf_add_type(env, t);
3093 cur += meta_size;
3094 env->log_type_id++;
3095 }
3096
3097 return 0;
3098}
3099
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003100static bool btf_resolve_valid(struct btf_verifier_env *env,
3101 const struct btf_type *t,
3102 u32 type_id)
3103{
3104 struct btf *btf = env->btf;
3105
3106 if (!env_type_is_resolved(env, type_id))
3107 return false;
3108
Daniel Borkmann1dc92852019-04-09 23:20:09 +02003109 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003110 return !btf->resolved_ids[type_id] &&
Daniel Borkmann1dc92852019-04-09 23:20:09 +02003111 !btf->resolved_sizes[type_id];
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003112
Daniel Borkmann1dc92852019-04-09 23:20:09 +02003113 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3114 btf_type_is_var(t)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003115 t = btf_type_id_resolve(btf, &type_id);
Daniel Borkmann1dc92852019-04-09 23:20:09 +02003116 return t &&
3117 !btf_type_is_modifier(t) &&
3118 !btf_type_is_var(t) &&
3119 !btf_type_is_datasec(t);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003120 }
3121
3122 if (btf_type_is_array(t)) {
3123 const struct btf_array *array = btf_type_array(t);
3124 const struct btf_type *elem_type;
3125 u32 elem_type_id = array->type;
3126 u32 elem_size;
3127
3128 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3129 return elem_type && !btf_type_is_modifier(elem_type) &&
3130 (array->nelems * elem_size ==
3131 btf->resolved_sizes[type_id]);
3132 }
3133
3134 return false;
3135}
3136
Martin KaFai Lau2667a262018-11-19 15:29:08 -08003137static int btf_resolve(struct btf_verifier_env *env,
3138 const struct btf_type *t, u32 type_id)
3139{
3140 u32 save_log_type_id = env->log_type_id;
3141 const struct resolve_vertex *v;
3142 int err = 0;
3143
3144 env->resolve_mode = RESOLVE_TBD;
3145 env_stack_push(env, t, type_id);
3146 while (!err && (v = env_stack_peak(env))) {
3147 env->log_type_id = v->type_id;
3148 err = btf_type_ops(v->t)->resolve(env, v);
3149 }
3150
3151 env->log_type_id = type_id;
3152 if (err == -E2BIG) {
3153 btf_verifier_log_type(env, t,
3154 "Exceeded max resolving depth:%u",
3155 MAX_RESOLVE_DEPTH);
3156 } else if (err == -EEXIST) {
3157 btf_verifier_log_type(env, t, "Loop detected");
3158 }
3159
3160 /* Final sanity check */
3161 if (!err && !btf_resolve_valid(env, t, type_id)) {
3162 btf_verifier_log_type(env, t, "Invalid resolve state");
3163 err = -EINVAL;
3164 }
3165
3166 env->log_type_id = save_log_type_id;
3167 return err;
3168}
3169
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003170static int btf_check_all_types(struct btf_verifier_env *env)
3171{
3172 struct btf *btf = env->btf;
3173 u32 type_id;
3174 int err;
3175
3176 err = env_resolve_init(env);
3177 if (err)
3178 return err;
3179
3180 env->phase++;
3181 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3182 const struct btf_type *t = btf_type_by_id(btf, type_id);
3183
3184 env->log_type_id = type_id;
3185 if (btf_type_needs_resolve(t) &&
3186 !env_type_is_resolved(env, type_id)) {
3187 err = btf_resolve(env, t, type_id);
3188 if (err)
3189 return err;
3190 }
3191
Martin KaFai Lau2667a262018-11-19 15:29:08 -08003192 if (btf_type_is_func_proto(t)) {
3193 err = btf_func_proto_check(env, t);
3194 if (err)
3195 return err;
3196 }
3197
3198 if (btf_type_is_func(t)) {
3199 err = btf_func_check(env, t);
3200 if (err)
3201 return err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003202 }
3203 }
3204
3205 return 0;
3206}
3207
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003208static int btf_parse_type_sec(struct btf_verifier_env *env)
3209{
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003210 const struct btf_header *hdr = &env->btf->hdr;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003211 int err;
3212
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003213 /* Type section must align to 4 bytes */
3214 if (hdr->type_off & (sizeof(u32) - 1)) {
3215 btf_verifier_log(env, "Unaligned type_off");
3216 return -EINVAL;
3217 }
3218
3219 if (!hdr->type_len) {
3220 btf_verifier_log(env, "No type found");
3221 return -EINVAL;
3222 }
3223
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07003224 err = btf_check_all_metas(env);
3225 if (err)
3226 return err;
3227
3228 return btf_check_all_types(env);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003229}
3230
3231static int btf_parse_str_sec(struct btf_verifier_env *env)
3232{
3233 const struct btf_header *hdr;
3234 struct btf *btf = env->btf;
3235 const char *start, *end;
3236
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003237 hdr = &btf->hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003238 start = btf->nohdr_data + hdr->str_off;
3239 end = start + hdr->str_len;
3240
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003241 if (end != btf->data + btf->data_size) {
3242 btf_verifier_log(env, "String section is not at the end");
3243 return -EINVAL;
3244 }
3245
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003246 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3247 start[0] || end[-1]) {
3248 btf_verifier_log(env, "Invalid string section");
3249 return -EINVAL;
3250 }
3251
3252 btf->strings = start;
3253
3254 return 0;
3255}
3256
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003257static const size_t btf_sec_info_offset[] = {
3258 offsetof(struct btf_header, type_off),
3259 offsetof(struct btf_header, str_off),
3260};
3261
3262static int btf_sec_info_cmp(const void *a, const void *b)
3263{
3264 const struct btf_sec_info *x = a;
3265 const struct btf_sec_info *y = b;
3266
3267 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3268}
3269
3270static int btf_check_sec_info(struct btf_verifier_env *env,
3271 u32 btf_data_size)
3272{
Martin KaFai Laua2889a42018-05-23 11:32:36 -07003273 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003274 u32 total, expected_total, i;
3275 const struct btf_header *hdr;
3276 const struct btf *btf;
3277
3278 btf = env->btf;
3279 hdr = &btf->hdr;
3280
3281 /* Populate the secs from hdr */
Martin KaFai Laua2889a42018-05-23 11:32:36 -07003282 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003283 secs[i] = *(struct btf_sec_info *)((void *)hdr +
3284 btf_sec_info_offset[i]);
3285
Martin KaFai Laua2889a42018-05-23 11:32:36 -07003286 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3287 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003288
3289 /* Check for gaps and overlap among sections */
3290 total = 0;
3291 expected_total = btf_data_size - hdr->hdr_len;
Martin KaFai Laua2889a42018-05-23 11:32:36 -07003292 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003293 if (expected_total < secs[i].off) {
3294 btf_verifier_log(env, "Invalid section offset");
3295 return -EINVAL;
3296 }
3297 if (total < secs[i].off) {
3298 /* gap */
3299 btf_verifier_log(env, "Unsupported section found");
3300 return -EINVAL;
3301 }
3302 if (total > secs[i].off) {
3303 btf_verifier_log(env, "Section overlap found");
3304 return -EINVAL;
3305 }
3306 if (expected_total - total < secs[i].len) {
3307 btf_verifier_log(env,
3308 "Total section length too long");
3309 return -EINVAL;
3310 }
3311 total += secs[i].len;
3312 }
3313
3314 /* There is data other than hdr and known sections */
3315 if (expected_total != total) {
3316 btf_verifier_log(env, "Unsupported section found");
3317 return -EINVAL;
3318 }
3319
3320 return 0;
3321}
3322
Martin Lau4a6998a2018-10-24 20:42:25 +00003323static int btf_parse_hdr(struct btf_verifier_env *env)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003324{
Martin Lau4a6998a2018-10-24 20:42:25 +00003325 u32 hdr_len, hdr_copy, btf_data_size;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003326 const struct btf_header *hdr;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003327 struct btf *btf;
3328 int err;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003329
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003330 btf = env->btf;
Martin Lau4a6998a2018-10-24 20:42:25 +00003331 btf_data_size = btf->data_size;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003332
Martin Lau4a6998a2018-10-24 20:42:25 +00003333 if (btf_data_size <
3334 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003335 btf_verifier_log(env, "hdr_len not found");
3336 return -EINVAL;
3337 }
3338
Martin Lau4a6998a2018-10-24 20:42:25 +00003339 hdr = btf->data;
3340 hdr_len = hdr->hdr_len;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003341 if (btf_data_size < hdr_len) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003342 btf_verifier_log(env, "btf_header not found");
3343 return -EINVAL;
3344 }
3345
Martin Lau4a6998a2018-10-24 20:42:25 +00003346 /* Ensure the unsupported header fields are zero */
3347 if (hdr_len > sizeof(btf->hdr)) {
3348 u8 *expected_zero = btf->data + sizeof(btf->hdr);
3349 u8 *end = btf->data + hdr_len;
3350
3351 for (; expected_zero < end; expected_zero++) {
3352 if (*expected_zero) {
3353 btf_verifier_log(env, "Unsupported btf_header");
3354 return -E2BIG;
3355 }
3356 }
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003357 }
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003358
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003359 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
Martin Lau4a6998a2018-10-24 20:42:25 +00003360 memcpy(&btf->hdr, btf->data, hdr_copy);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003361
3362 hdr = &btf->hdr;
3363
3364 btf_verifier_log_hdr(env, btf_data_size);
3365
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003366 if (hdr->magic != BTF_MAGIC) {
3367 btf_verifier_log(env, "Invalid magic");
3368 return -EINVAL;
3369 }
3370
3371 if (hdr->version != BTF_VERSION) {
3372 btf_verifier_log(env, "Unsupported version");
3373 return -ENOTSUPP;
3374 }
3375
3376 if (hdr->flags) {
3377 btf_verifier_log(env, "Unsupported flags");
3378 return -ENOTSUPP;
3379 }
3380
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003381 if (btf_data_size == hdr->hdr_len) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003382 btf_verifier_log(env, "No data");
3383 return -EINVAL;
3384 }
3385
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003386 err = btf_check_sec_info(env, btf_data_size);
3387 if (err)
3388 return err;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003389
3390 return 0;
3391}
3392
3393static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3394 u32 log_level, char __user *log_ubuf, u32 log_size)
3395{
3396 struct btf_verifier_env *env = NULL;
3397 struct bpf_verifier_log *log;
3398 struct btf *btf = NULL;
3399 u8 *data;
3400 int err;
3401
3402 if (btf_data_size > BTF_MAX_SIZE)
3403 return ERR_PTR(-E2BIG);
3404
3405 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3406 if (!env)
3407 return ERR_PTR(-ENOMEM);
3408
3409 log = &env->log;
3410 if (log_level || log_ubuf || log_size) {
3411 /* user requested verbose verifier output
3412 * and supplied buffer to store the verification trace
3413 */
3414 log->level = log_level;
3415 log->ubuf = log_ubuf;
3416 log->len_total = log_size;
3417
3418 /* log attributes have to be sane */
3419 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3420 !log->level || !log->ubuf) {
3421 err = -EINVAL;
3422 goto errout;
3423 }
3424 }
3425
3426 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3427 if (!btf) {
3428 err = -ENOMEM;
3429 goto errout;
3430 }
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003431 env->btf = btf;
3432
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003433 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3434 if (!data) {
3435 err = -ENOMEM;
3436 goto errout;
3437 }
3438
3439 btf->data = data;
3440 btf->data_size = btf_data_size;
3441
3442 if (copy_from_user(data, btf_data, btf_data_size)) {
3443 err = -EFAULT;
3444 goto errout;
3445 }
3446
Martin Lau4a6998a2018-10-24 20:42:25 +00003447 err = btf_parse_hdr(env);
3448 if (err)
3449 goto errout;
3450
3451 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3452
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003453 err = btf_parse_str_sec(env);
3454 if (err)
3455 goto errout;
3456
3457 err = btf_parse_type_sec(env);
3458 if (err)
3459 goto errout;
3460
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003461 if (log->level && bpf_verifier_log_full(log)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003462 err = -ENOSPC;
3463 goto errout;
3464 }
3465
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07003466 btf_verifier_env_free(env);
3467 refcount_set(&btf->refcnt, 1);
3468 return btf;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07003469
3470errout:
3471 btf_verifier_env_free(env);
3472 if (btf)
3473 btf_free(btf);
3474 return ERR_PTR(err);
3475}
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07003476
Alexei Starovoitov8580ac92019-10-15 20:24:57 -07003477extern char __weak _binary__btf_vmlinux_bin_start[];
3478extern char __weak _binary__btf_vmlinux_bin_end[];
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -08003479extern struct btf *btf_vmlinux;
3480
3481#define BPF_MAP_TYPE(_id, _ops)
3482static union {
3483 struct bpf_ctx_convert {
3484#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3485 prog_ctx_type _id##_prog; \
3486 kern_ctx_type _id##_kern;
3487#include <linux/bpf_types.h>
3488#undef BPF_PROG_TYPE
3489 } *__t;
3490 /* 't' is written once under lock. Read many times. */
3491 const struct btf_type *t;
3492} bpf_ctx_convert;
3493enum {
3494#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3495 __ctx_convert##_id,
3496#include <linux/bpf_types.h>
3497#undef BPF_PROG_TYPE
Alexei Starovoitovce277092019-11-27 20:35:08 -08003498 __ctx_convert_unused, /* to avoid empty enum in extreme .config */
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -08003499};
3500static u8 bpf_ctx_convert_map[] = {
3501#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3502 [_id] = __ctx_convert##_id,
3503#include <linux/bpf_types.h>
3504#undef BPF_PROG_TYPE
Arnd Bergmann4c80c7bc2019-12-10 21:35:46 +01003505 0, /* avoid empty array */
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -08003506};
3507#undef BPF_MAP_TYPE
3508
3509static const struct btf_member *
3510btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
3511 const struct btf_type *t, enum bpf_prog_type prog_type)
3512{
3513 const struct btf_type *conv_struct;
3514 const struct btf_type *ctx_struct;
3515 const struct btf_member *ctx_type;
3516 const char *tname, *ctx_tname;
3517
3518 conv_struct = bpf_ctx_convert.t;
3519 if (!conv_struct) {
3520 bpf_log(log, "btf_vmlinux is malformed\n");
3521 return NULL;
3522 }
3523 t = btf_type_by_id(btf, t->type);
3524 while (btf_type_is_modifier(t))
3525 t = btf_type_by_id(btf, t->type);
3526 if (!btf_type_is_struct(t)) {
3527 /* Only pointer to struct is supported for now.
3528 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
3529 * is not supported yet.
3530 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
3531 */
3532 bpf_log(log, "BPF program ctx type is not a struct\n");
3533 return NULL;
3534 }
3535 tname = btf_name_by_offset(btf, t->name_off);
3536 if (!tname) {
3537 bpf_log(log, "BPF program ctx struct doesn't have a name\n");
3538 return NULL;
3539 }
3540 /* prog_type is valid bpf program type. No need for bounds check. */
3541 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
3542 /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
3543 * Like 'struct __sk_buff'
3544 */
3545 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
3546 if (!ctx_struct)
3547 /* should not happen */
3548 return NULL;
3549 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
3550 if (!ctx_tname) {
3551 /* should not happen */
3552 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
3553 return NULL;
3554 }
3555 /* only compare that prog's ctx type name is the same as
3556 * kernel expects. No need to compare field by field.
3557 * It's ok for bpf prog to do:
3558 * struct __sk_buff {};
3559 * int socket_filter_bpf_prog(struct __sk_buff *skb)
3560 * { // no fields of skb are ever used }
3561 */
3562 if (strcmp(ctx_tname, tname))
3563 return NULL;
3564 return ctx_type;
3565}
Alexei Starovoitov8580ac92019-10-15 20:24:57 -07003566
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003567static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
3568 struct btf *btf,
3569 const struct btf_type *t,
3570 enum bpf_prog_type prog_type)
3571{
3572 const struct btf_member *prog_ctx_type, *kern_ctx_type;
3573
3574 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type);
3575 if (!prog_ctx_type)
3576 return -ENOENT;
3577 kern_ctx_type = prog_ctx_type + 1;
3578 return kern_ctx_type->type;
3579}
3580
Alexei Starovoitov8580ac92019-10-15 20:24:57 -07003581struct btf *btf_parse_vmlinux(void)
3582{
3583 struct btf_verifier_env *env = NULL;
3584 struct bpf_verifier_log *log;
3585 struct btf *btf = NULL;
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -08003586 int err, i;
Alexei Starovoitov8580ac92019-10-15 20:24:57 -07003587
3588 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3589 if (!env)
3590 return ERR_PTR(-ENOMEM);
3591
3592 log = &env->log;
3593 log->level = BPF_LOG_KERNEL;
3594
3595 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3596 if (!btf) {
3597 err = -ENOMEM;
3598 goto errout;
3599 }
3600 env->btf = btf;
3601
3602 btf->data = _binary__btf_vmlinux_bin_start;
3603 btf->data_size = _binary__btf_vmlinux_bin_end -
3604 _binary__btf_vmlinux_bin_start;
3605
3606 err = btf_parse_hdr(env);
3607 if (err)
3608 goto errout;
3609
3610 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3611
3612 err = btf_parse_str_sec(env);
3613 if (err)
3614 goto errout;
3615
3616 err = btf_check_all_metas(env);
3617 if (err)
3618 goto errout;
3619
Alexei Starovoitov91cc1a92019-11-14 10:57:15 -08003620 /* find struct bpf_ctx_convert for type checking later */
3621 for (i = 1; i <= btf->nr_types; i++) {
3622 const struct btf_type *t;
3623 const char *tname;
3624
3625 t = btf_type_by_id(btf, i);
3626 if (!__btf_type_is_struct(t))
3627 continue;
3628 tname = __btf_name_by_offset(btf, t->name_off);
3629 if (!strcmp(tname, "bpf_ctx_convert")) {
3630 /* btf_parse_vmlinux() runs under bpf_verifier_lock */
3631 bpf_ctx_convert.t = t;
3632 break;
3633 }
3634 }
3635 if (i > btf->nr_types) {
3636 err = -ENOENT;
3637 goto errout;
3638 }
3639
Martin KaFai Lau27ae79972020-01-08 16:35:03 -08003640 bpf_struct_ops_init(btf);
3641
Alexei Starovoitov8580ac92019-10-15 20:24:57 -07003642 btf_verifier_env_free(env);
3643 refcount_set(&btf->refcnt, 1);
3644 return btf;
3645
3646errout:
3647 btf_verifier_env_free(env);
3648 if (btf) {
3649 kvfree(btf->types);
3650 kfree(btf);
3651 }
3652 return ERR_PTR(err);
3653}
3654
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003655struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
3656{
3657 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3658
3659 if (tgt_prog) {
3660 return tgt_prog->aux->btf;
3661 } else {
3662 return btf_vmlinux;
3663 }
3664}
3665
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003666bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3667 const struct bpf_prog *prog,
3668 struct bpf_insn_access_aux *info)
3669{
Martin KaFai Lau38207292019-10-24 17:18:11 -07003670 const struct btf_type *t = prog->aux->attach_func_proto;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003671 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3672 struct btf *btf = bpf_prog_get_target_btf(prog);
Martin KaFai Lau38207292019-10-24 17:18:11 -07003673 const char *tname = prog->aux->attach_func_name;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003674 struct bpf_verifier_log *log = info->log;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003675 const struct btf_param *args;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003676 u32 nr_args, arg;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003677 int ret;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003678
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003679 if (off % 8) {
Martin KaFai Lau38207292019-10-24 17:18:11 -07003680 bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003681 tname, off);
3682 return false;
3683 }
3684 arg = off / 8;
3685 args = (const struct btf_param *)(t + 1);
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003686 /* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
3687 nr_args = t ? btf_type_vlen(t) : 5;
Martin KaFai Lau38207292019-10-24 17:18:11 -07003688 if (prog->aux->attach_btf_trace) {
3689 /* skip first 'void *__data' argument in btf_trace_##name typedef */
3690 args++;
3691 nr_args--;
3692 }
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003693
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08003694 if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
3695 arg == nr_args) {
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003696 if (!t)
3697 /* Default prog with 5 args. 6th arg is retval. */
3698 return true;
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08003699 /* function return type */
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003700 t = btf_type_by_id(btf, t->type);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08003701 } else if (arg >= nr_args) {
3702 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3703 tname, arg + 1);
3704 return false;
3705 } else {
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003706 if (!t)
3707 /* Default prog with 5 args */
3708 return true;
3709 t = btf_type_by_id(btf, args[arg].type);
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08003710 }
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003711 /* skip modifiers */
3712 while (btf_type_is_modifier(t))
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003713 t = btf_type_by_id(btf, t->type);
Martin KaFai Lau218b3f62020-01-08 16:34:59 -08003714 if (btf_type_is_int(t) || btf_type_is_enum(t))
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003715 /* accessing a scalar */
3716 return true;
3717 if (!btf_type_is_ptr(t)) {
3718 bpf_log(log,
Martin KaFai Lau38207292019-10-24 17:18:11 -07003719 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003720 tname, arg,
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003721 __btf_name_by_offset(btf, t->name_off),
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003722 btf_kind_str[BTF_INFO_KIND(t->info)]);
3723 return false;
3724 }
3725 if (t->type == 0)
3726 /* This is a pointer to void.
3727 * It is the same as scalar from the verifier safety pov.
3728 * No further pointer walking is allowed.
3729 */
3730 return true;
3731
3732 /* this is a pointer to another type */
3733 info->reg_type = PTR_TO_BTF_ID;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003734
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003735 if (tgt_prog) {
3736 ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type);
3737 if (ret > 0) {
3738 info->btf_id = ret;
3739 return true;
3740 } else {
3741 return false;
3742 }
3743 }
Martin KaFai Lau275517f2020-01-08 16:34:56 -08003744
3745 info->btf_id = t->type;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003746 t = btf_type_by_id(btf, t->type);
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003747 /* skip modifiers */
Martin KaFai Lau275517f2020-01-08 16:34:56 -08003748 while (btf_type_is_modifier(t)) {
3749 info->btf_id = t->type;
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003750 t = btf_type_by_id(btf, t->type);
Martin KaFai Lau275517f2020-01-08 16:34:56 -08003751 }
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003752 if (!btf_type_is_struct(t)) {
3753 bpf_log(log,
Martin KaFai Lau38207292019-10-24 17:18:11 -07003754 "func '%s' arg%d type %s is not a struct\n",
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003755 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3756 return false;
3757 }
Martin KaFai Lau38207292019-10-24 17:18:11 -07003758 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003759 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08003760 __btf_name_by_offset(btf, t->name_off));
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003761 return true;
3762}
3763
3764int btf_struct_access(struct bpf_verifier_log *log,
3765 const struct btf_type *t, int off, int size,
3766 enum bpf_access_type atype,
3767 u32 *next_btf_id)
3768{
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003769 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
3770 const struct btf_type *mtype, *elem_type = NULL;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003771 const struct btf_member *member;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003772 const char *tname, *mname;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003773
3774again:
3775 tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3776 if (!btf_type_is_struct(t)) {
Martin KaFai Lau275517f2020-01-08 16:34:56 -08003777 bpf_log(log, "Type '%s' is not a struct\n", tname);
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003778 return -EINVAL;
3779 }
3780
Martin KaFai Lau976aba02020-01-08 16:35:01 -08003781 if (off + size > t->size) {
3782 bpf_log(log, "access beyond struct %s at off %u size %u\n",
3783 tname, off, size);
3784 return -EACCES;
3785 }
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003786
Martin KaFai Lau976aba02020-01-08 16:35:01 -08003787 for_each_member(i, t, member) {
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003788 /* offset of the field in bytes */
3789 moff = btf_member_bit_offset(t, member) / 8;
3790 if (off + size <= moff)
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003791 /* won't find anything, field is already too far */
3792 break;
Martin KaFai Lau976aba02020-01-08 16:35:01 -08003793
3794 if (btf_member_bitfield_size(t, member)) {
3795 u32 end_bit = btf_member_bit_offset(t, member) +
3796 btf_member_bitfield_size(t, member);
3797
3798 /* off <= moff instead of off == moff because clang
3799 * does not generate a BTF member for anonymous
3800 * bitfield like the ":16" here:
3801 * struct {
3802 * int :16;
3803 * int x:8;
3804 * };
3805 */
3806 if (off <= moff &&
3807 BITS_ROUNDUP_BYTES(end_bit) <= off + size)
3808 return SCALAR_VALUE;
3809
3810 /* off may be accessing a following member
3811 *
3812 * or
3813 *
3814 * Doing partial access at either end of this
3815 * bitfield. Continue on this case also to
3816 * treat it as not accessing this bitfield
3817 * and eventually error out as field not
3818 * found to keep it simple.
3819 * It could be relaxed if there was a legit
3820 * partial access case later.
3821 */
3822 continue;
3823 }
3824
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003825 /* In case of "off" is pointing to holes of a struct */
3826 if (off < moff)
Martin KaFai Lau976aba02020-01-08 16:35:01 -08003827 break;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003828
3829 /* type of the field */
3830 mtype = btf_type_by_id(btf_vmlinux, member->type);
3831 mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
3832
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003833 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
3834 &elem_type, &total_nelems);
3835 if (IS_ERR(mtype)) {
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003836 bpf_log(log, "field %s doesn't have size\n", mname);
3837 return -EFAULT;
3838 }
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003839
3840 mtrue_end = moff + msize;
3841 if (off >= mtrue_end)
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003842 /* no overlap with member, keep iterating */
3843 continue;
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003844
3845 if (btf_type_is_array(mtype)) {
3846 u32 elem_idx;
3847
3848 /* btf_resolve_size() above helps to
3849 * linearize a multi-dimensional array.
3850 *
3851 * The logic here is treating an array
3852 * in a struct as the following way:
3853 *
3854 * struct outer {
3855 * struct inner array[2][2];
3856 * };
3857 *
3858 * looks like:
3859 *
3860 * struct outer {
3861 * struct inner array_elem0;
3862 * struct inner array_elem1;
3863 * struct inner array_elem2;
3864 * struct inner array_elem3;
3865 * };
3866 *
3867 * When accessing outer->array[1][0], it moves
3868 * moff to "array_elem2", set mtype to
3869 * "struct inner", and msize also becomes
3870 * sizeof(struct inner). Then most of the
3871 * remaining logic will fall through without
3872 * caring the current member is an array or
3873 * not.
3874 *
3875 * Unlike mtype/msize/moff, mtrue_end does not
3876 * change. The naming difference ("_true") tells
3877 * that it is not always corresponding to
3878 * the current mtype/msize/moff.
3879 * It is the true end of the current
3880 * member (i.e. array in this case). That
3881 * will allow an int array to be accessed like
3882 * a scratch space,
3883 * i.e. allow access beyond the size of
3884 * the array's element as long as it is
3885 * within the mtrue_end boundary.
3886 */
3887
3888 /* skip empty array */
3889 if (moff == mtrue_end)
3890 continue;
3891
3892 msize /= total_nelems;
3893 elem_idx = (off - moff) / msize;
3894 moff += elem_idx * msize;
3895 mtype = elem_type;
3896 }
3897
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003898 /* the 'off' we're looking for is either equal to start
3899 * of this field or inside of this struct
3900 */
3901 if (btf_type_is_struct(mtype)) {
3902 /* our field must be inside that union or struct */
3903 t = mtype;
3904
3905 /* adjust offset we're looking for */
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003906 off -= moff;
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003907 goto again;
3908 }
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003909
3910 if (btf_type_is_ptr(mtype)) {
3911 const struct btf_type *stype;
3912
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003913 if (msize != size || off != moff) {
3914 bpf_log(log,
3915 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
3916 mname, moff, tname, off, size);
3917 return -EACCES;
3918 }
3919
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003920 stype = btf_type_by_id(btf_vmlinux, mtype->type);
3921 /* skip modifiers */
3922 while (btf_type_is_modifier(stype))
3923 stype = btf_type_by_id(btf_vmlinux, stype->type);
3924 if (btf_type_is_struct(stype)) {
3925 *next_btf_id = mtype->type;
3926 return PTR_TO_BTF_ID;
3927 }
3928 }
Martin KaFai Lau7e3617a2019-11-07 10:09:03 -08003929
3930 /* Allow more flexible access within an int as long as
3931 * it is within mtrue_end.
3932 * Since mtrue_end could be the end of an array,
3933 * that also allows using an array of int as a scratch
3934 * space. e.g. skb->cb[].
3935 */
3936 if (off + size > mtrue_end) {
3937 bpf_log(log,
3938 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
3939 mname, mtrue_end, tname, off, size);
3940 return -EACCES;
3941 }
3942
Alexei Starovoitov9e15db62019-10-15 20:25:00 -07003943 return SCALAR_VALUE;
3944 }
3945 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
3946 return -EINVAL;
3947}
3948
Alexei Starovoitov9cc31b32019-11-14 10:57:14 -08003949static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
3950 int arg)
Alexei Starovoitova7658e12019-10-15 20:25:04 -07003951{
3952 char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
3953 const struct btf_param *args;
3954 const struct btf_type *t;
3955 const char *tname, *sym;
3956 u32 btf_id, i;
3957
3958 if (IS_ERR(btf_vmlinux)) {
3959 bpf_log(log, "btf_vmlinux is malformed\n");
3960 return -EINVAL;
3961 }
3962
3963 sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
3964 if (!sym) {
3965 bpf_log(log, "kernel doesn't have kallsyms\n");
3966 return -EFAULT;
3967 }
3968
3969 for (i = 1; i <= btf_vmlinux->nr_types; i++) {
3970 t = btf_type_by_id(btf_vmlinux, i);
3971 if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
3972 continue;
3973 tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3974 if (!strcmp(tname, fnname))
3975 break;
3976 }
3977 if (i > btf_vmlinux->nr_types) {
3978 bpf_log(log, "helper %s type is not found\n", fnname);
3979 return -ENOENT;
3980 }
3981
3982 t = btf_type_by_id(btf_vmlinux, t->type);
3983 if (!btf_type_is_ptr(t))
3984 return -EFAULT;
3985 t = btf_type_by_id(btf_vmlinux, t->type);
3986 if (!btf_type_is_func_proto(t))
3987 return -EFAULT;
3988
3989 args = (const struct btf_param *)(t + 1);
3990 if (arg >= btf_type_vlen(t)) {
3991 bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
3992 fnname, arg);
3993 return -EINVAL;
3994 }
3995
3996 t = btf_type_by_id(btf_vmlinux, args[arg].type);
3997 if (!btf_type_is_ptr(t) || !t->type) {
3998 /* anything but the pointer to struct is a helper config bug */
3999 bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
4000 return -EFAULT;
4001 }
4002 btf_id = t->type;
4003 t = btf_type_by_id(btf_vmlinux, t->type);
4004 /* skip modifiers */
4005 while (btf_type_is_modifier(t)) {
4006 btf_id = t->type;
4007 t = btf_type_by_id(btf_vmlinux, t->type);
4008 }
4009 if (!btf_type_is_struct(t)) {
4010 bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
4011 return -EFAULT;
4012 }
4013 bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
4014 arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
4015 return btf_id;
4016}
4017
Alexei Starovoitov9cc31b32019-11-14 10:57:14 -08004018int btf_resolve_helper_id(struct bpf_verifier_log *log,
4019 const struct bpf_func_proto *fn, int arg)
4020{
4021 int *btf_id = &fn->btf_id[arg];
4022 int ret;
4023
4024 if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
4025 return -EINVAL;
4026
4027 ret = READ_ONCE(*btf_id);
4028 if (ret)
4029 return ret;
4030 /* ok to race the search. The result is the same */
4031 ret = __btf_resolve_helper_id(log, fn->func, arg);
4032 if (!ret) {
4033 /* Function argument cannot be type 'void' */
4034 bpf_log(log, "BTF resolution bug\n");
4035 return -EFAULT;
4036 }
4037 WRITE_ONCE(*btf_id, ret);
4038 return ret;
4039}
4040
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08004041static int __get_type_size(struct btf *btf, u32 btf_id,
4042 const struct btf_type **bad_type)
4043{
4044 const struct btf_type *t;
4045
4046 if (!btf_id)
4047 /* void */
4048 return 0;
4049 t = btf_type_by_id(btf, btf_id);
4050 while (t && btf_type_is_modifier(t))
4051 t = btf_type_by_id(btf, t->type);
Alexei Starovoitovd0f01042019-11-26 15:01:06 -08004052 if (!t) {
4053 *bad_type = btf->types[0];
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08004054 return -EINVAL;
Alexei Starovoitovd0f01042019-11-26 15:01:06 -08004055 }
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08004056 if (btf_type_is_ptr(t))
4057 /* kernel size of pointer. Not BPF's size of pointer*/
4058 return sizeof(void *);
4059 if (btf_type_is_int(t) || btf_type_is_enum(t))
4060 return t->size;
4061 *bad_type = t;
4062 return -EINVAL;
4063}
4064
4065int btf_distill_func_proto(struct bpf_verifier_log *log,
4066 struct btf *btf,
4067 const struct btf_type *func,
4068 const char *tname,
4069 struct btf_func_model *m)
4070{
4071 const struct btf_param *args;
4072 const struct btf_type *t;
4073 u32 i, nargs;
4074 int ret;
4075
Alexei Starovoitov5b92a282019-11-14 10:57:17 -08004076 if (!func) {
4077 /* BTF function prototype doesn't match the verifier types.
4078 * Fall back to 5 u64 args.
4079 */
4080 for (i = 0; i < 5; i++)
4081 m->arg_size[i] = 8;
4082 m->ret_size = 8;
4083 m->nr_args = 5;
4084 return 0;
4085 }
Alexei Starovoitovfec56f52019-11-14 10:57:04 -08004086 args = (const struct btf_param *)(func + 1);
4087 nargs = btf_type_vlen(func);
4088 if (nargs >= MAX_BPF_FUNC_ARGS) {
4089 bpf_log(log,
4090 "The function %s has %d arguments. Too many.\n",
4091 tname, nargs);
4092 return -EINVAL;
4093 }
4094 ret = __get_type_size(btf, func->type, &t);
4095 if (ret < 0) {
4096 bpf_log(log,
4097 "The function %s return type %s is unsupported.\n",
4098 tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4099 return -EINVAL;
4100 }
4101 m->ret_size = ret;
4102
4103 for (i = 0; i < nargs; i++) {
4104 ret = __get_type_size(btf, args[i].type, &t);
4105 if (ret < 0) {
4106 bpf_log(log,
4107 "The function %s arg%d type %s is unsupported.\n",
4108 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4109 return -EINVAL;
4110 }
4111 m->arg_size[i] = ret;
4112 }
4113 m->nr_args = nargs;
4114 return 0;
4115}
4116
Alexei Starovoitov8c1b6e62019-11-14 10:57:16 -08004117int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
4118{
4119 struct bpf_verifier_state *st = env->cur_state;
4120 struct bpf_func_state *func = st->frame[st->curframe];
4121 struct bpf_reg_state *reg = func->regs;
4122 struct bpf_verifier_log *log = &env->log;
4123 struct bpf_prog *prog = env->prog;
4124 struct btf *btf = prog->aux->btf;
4125 const struct btf_param *args;
4126 const struct btf_type *t;
4127 u32 i, nargs, btf_id;
4128 const char *tname;
4129
4130 if (!prog->aux->func_info)
4131 return 0;
4132
4133 btf_id = prog->aux->func_info[subprog].type_id;
4134 if (!btf_id)
4135 return 0;
4136
4137 if (prog->aux->func_info_aux[subprog].unreliable)
4138 return 0;
4139
4140 t = btf_type_by_id(btf, btf_id);
4141 if (!t || !btf_type_is_func(t)) {
4142 bpf_log(log, "BTF of subprog %d doesn't point to KIND_FUNC\n",
4143 subprog);
4144 return -EINVAL;
4145 }
4146 tname = btf_name_by_offset(btf, t->name_off);
4147
4148 t = btf_type_by_id(btf, t->type);
4149 if (!t || !btf_type_is_func_proto(t)) {
4150 bpf_log(log, "Invalid type of func %s\n", tname);
4151 return -EINVAL;
4152 }
4153 args = (const struct btf_param *)(t + 1);
4154 nargs = btf_type_vlen(t);
4155 if (nargs > 5) {
4156 bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
4157 goto out;
4158 }
4159 /* check that BTF function arguments match actual types that the
4160 * verifier sees.
4161 */
4162 for (i = 0; i < nargs; i++) {
4163 t = btf_type_by_id(btf, args[i].type);
4164 while (btf_type_is_modifier(t))
4165 t = btf_type_by_id(btf, t->type);
4166 if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4167 if (reg[i + 1].type == SCALAR_VALUE)
4168 continue;
4169 bpf_log(log, "R%d is not a scalar\n", i + 1);
4170 goto out;
4171 }
4172 if (btf_type_is_ptr(t)) {
4173 if (reg[i + 1].type == SCALAR_VALUE) {
4174 bpf_log(log, "R%d is not a pointer\n", i + 1);
4175 goto out;
4176 }
4177 /* If program is passing PTR_TO_CTX into subprogram
4178 * check that BTF type matches.
4179 */
4180 if (reg[i + 1].type == PTR_TO_CTX &&
4181 !btf_get_prog_ctx_type(log, btf, t, prog->type))
4182 goto out;
4183 /* All other pointers are ok */
4184 continue;
4185 }
4186 bpf_log(log, "Unrecognized argument type %s\n",
4187 btf_kind_str[BTF_INFO_KIND(t->info)]);
4188 goto out;
4189 }
4190 return 0;
4191out:
4192 /* LLVM optimizations can remove arguments from static functions. */
4193 bpf_log(log,
4194 "Type info disagrees with actual arguments due to compiler optimizations\n");
4195 prog->aux->func_info_aux[subprog].unreliable = true;
4196 return 0;
4197}
4198
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07004199void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
4200 struct seq_file *m)
4201{
4202 const struct btf_type *t = btf_type_by_id(btf, type_id);
4203
4204 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
4205}
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004206
Quentin Monnet3481e642019-08-20 14:53:46 +01004207#ifdef CONFIG_PROC_FS
4208static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
4209{
4210 const struct btf *btf = filp->private_data;
4211
4212 seq_printf(m, "btf_id:\t%u\n", btf->id);
4213}
4214#endif
4215
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004216static int btf_release(struct inode *inode, struct file *filp)
4217{
4218 btf_put(filp->private_data);
4219 return 0;
4220}
4221
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07004222const struct file_operations btf_fops = {
Quentin Monnet3481e642019-08-20 14:53:46 +01004223#ifdef CONFIG_PROC_FS
4224 .show_fdinfo = bpf_btf_show_fdinfo,
4225#endif
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004226 .release = btf_release,
4227};
4228
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07004229static int __btf_new_fd(struct btf *btf)
4230{
4231 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
4232}
4233
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004234int btf_new_fd(const union bpf_attr *attr)
4235{
4236 struct btf *btf;
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07004237 int ret;
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004238
4239 btf = btf_parse(u64_to_user_ptr(attr->btf),
4240 attr->btf_size, attr->btf_log_level,
4241 u64_to_user_ptr(attr->btf_log_buf),
4242 attr->btf_log_size);
4243 if (IS_ERR(btf))
4244 return PTR_ERR(btf);
4245
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07004246 ret = btf_alloc_id(btf);
4247 if (ret) {
4248 btf_free(btf);
4249 return ret;
4250 }
4251
4252 /*
4253 * The BTF ID is published to the userspace.
4254 * All BTF free must go through call_rcu() from
4255 * now on (i.e. free by calling btf_put()).
4256 */
4257
4258 ret = __btf_new_fd(btf);
4259 if (ret < 0)
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004260 btf_put(btf);
4261
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07004262 return ret;
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004263}
4264
4265struct btf *btf_get_by_fd(int fd)
4266{
4267 struct btf *btf;
4268 struct fd f;
4269
4270 f = fdget(fd);
4271
4272 if (!f.file)
4273 return ERR_PTR(-EBADF);
4274
4275 if (f.file->f_op != &btf_fops) {
4276 fdput(f);
4277 return ERR_PTR(-EINVAL);
4278 }
4279
4280 btf = f.file->private_data;
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07004281 refcount_inc(&btf->refcnt);
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07004282 fdput(f);
4283
4284 return btf;
4285}
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07004286
4287int btf_get_info_by_fd(const struct btf *btf,
4288 const union bpf_attr *attr,
4289 union bpf_attr __user *uattr)
4290{
Martin KaFai Lau62dab842018-05-04 14:49:52 -07004291 struct bpf_btf_info __user *uinfo;
4292 struct bpf_btf_info info = {};
4293 u32 info_copy, btf_copy;
4294 void __user *ubtf;
4295 u32 uinfo_len;
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07004296
Martin KaFai Lau62dab842018-05-04 14:49:52 -07004297 uinfo = u64_to_user_ptr(attr->info.info);
4298 uinfo_len = attr->info.info_len;
4299
4300 info_copy = min_t(u32, uinfo_len, sizeof(info));
4301 if (copy_from_user(&info, uinfo, info_copy))
4302 return -EFAULT;
4303
4304 info.id = btf->id;
4305 ubtf = u64_to_user_ptr(info.btf);
4306 btf_copy = min_t(u32, btf->data_size, info.btf_size);
4307 if (copy_to_user(ubtf, btf->data, btf_copy))
4308 return -EFAULT;
4309 info.btf_size = btf->data_size;
4310
4311 if (copy_to_user(uinfo, &info, info_copy) ||
4312 put_user(info_copy, &uattr->info.info_len))
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07004313 return -EFAULT;
4314
4315 return 0;
4316}
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07004317
4318int btf_get_fd_by_id(u32 id)
4319{
4320 struct btf *btf;
4321 int fd;
4322
4323 rcu_read_lock();
4324 btf = idr_find(&btf_idr, id);
4325 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
4326 btf = ERR_PTR(-ENOENT);
4327 rcu_read_unlock();
4328
4329 if (IS_ERR(btf))
4330 return PTR_ERR(btf);
4331
4332 fd = __btf_new_fd(btf);
4333 if (fd < 0)
4334 btf_put(btf);
4335
4336 return fd;
4337}
4338
4339u32 btf_id(const struct btf *btf)
4340{
4341 return btf->id;
4342}