1 /* Common BPF helpers to be used by all BPF programs loaded by Android */ 2 3 #include <linux/bpf.h> 4 #include <stdbool.h> 5 #include <stdint.h> 6 7 #include "bpf_map_def.h" 8 9 /****************************************************************************** 10 * WARNING: CHANGES TO THIS FILE OUTSIDE OF AOSP/MASTER ARE LIKELY TO BREAK * 11 * DEVICE COMPATIBILITY WITH MAINLINE MODULES SHIPPING EBPF CODE. * 12 * * 13 * THIS WILL LIKELY RESULT IN BRICKED DEVICES AT SOME ARBITRARY FUTURE TIME * 14 * * 15 * THAT GOES ESPECIALLY FOR THE 'SECTION' 'LICENSE' AND 'CRITICAL' MACROS * 16 * * 17 * We strongly suggest that if you need changes to bpfloader functionality * 18 * you get your changes reviewed and accepted into aosp/master. * 19 * * 20 ******************************************************************************/ 21 22 // The actual versions of the bpfloader that shipped in various Android releases 23 24 // Android P/Q/R: BpfLoader was initially part of netd, 25 // this was later split out into a standalone binary, but was unversioned. 26 27 // Android S / 12 (api level 31) - added 'tethering' mainline eBPF support 28 #define BPFLOADER_S_VERSION 2u 29 30 // Android T / 13 (api level 33) - support for shared/selinux_context/pindir 31 #define BPFLOADER_T_VERSION 19u 32 33 // BpfLoader v0.25+ support obj@ver.o files 34 #define BPFLOADER_OBJ_AT_VER_VERSION 25u 35 36 // Bpfloader v0.33+ supports {map,prog}.ignore_on_{eng,user,userdebug} 37 #define BPFLOADER_IGNORED_ON_VERSION 33u 38 39 // Android U / 14 (api level 34) - various new program types added 40 #define BPFLOADER_U_VERSION 38u 41 42 // Android U QPR2 / 14 (api level 34) - platform only 43 // (note: the platform bpfloader in V isn't really versioned at all, 44 // as there is no need as it can only load objects compiled at the 45 // same time as itself and the rest of the platform) 46 #define BPFLOADER_U_QPR2_VERSION 41u 47 #define BPFLOADER_PLATFORM_VERSION BPFLOADER_U_QPR2_VERSION 48 49 // Android Mainline - this bpfloader should eventually go back to T (or even S) 50 // Note: this value (and the following +1u's) are hardcoded in NetBpfLoad.cpp 51 #define BPFLOADER_MAINLINE_VERSION 42u 52 53 // Android Mainline BpfLoader when running on Android T 54 #define BPFLOADER_MAINLINE_T_VERSION (BPFLOADER_MAINLINE_VERSION + 1u) 55 56 // Android Mainline BpfLoader when running on Android U 57 #define BPFLOADER_MAINLINE_U_VERSION (BPFLOADER_MAINLINE_T_VERSION + 1u) 58 59 // Android Mainline BpfLoader when running on Android U QPR3 60 #define BPFLOADER_MAINLINE_U_QPR3_VERSION (BPFLOADER_MAINLINE_U_VERSION + 1u) 61 62 // Android Mainline BpfLoader when running on Android V 63 #define BPFLOADER_MAINLINE_V_VERSION (BPFLOADER_MAINLINE_U_QPR3_VERSION + 1u) 64 65 /* For mainline module use, you can #define BPFLOADER_{MIN/MAX}_VER 66 * before #include "bpf_helpers.h" to change which bpfloaders will 67 * process the resulting .o file. 68 * 69 * While this will work outside of mainline too, there just is no point to 70 * using it when the .o and the bpfloader ship in sync with each other. 71 * In which case it's just best to use the default. 72 */ 73 #ifndef BPFLOADER_MIN_VER 74 #define BPFLOADER_MIN_VER BPFLOADER_PLATFORM_VERSION 75 #endif 76 77 #ifndef BPFLOADER_MAX_VER 78 #define BPFLOADER_MAX_VER DEFAULT_BPFLOADER_MAX_VER 79 #endif 80 81 /* place things in different elf sections */ 82 #define SECTION(NAME) __attribute__((section(NAME), used)) 83 84 /* Must be present in every program, example usage: 85 * LICENSE("GPL"); or LICENSE("Apache 2.0"); 86 * 87 * We also take this opportunity to embed a bunch of other useful values in 88 * the resulting .o (This is to enable some limited forward compatibility 89 * with mainline module shipped ebpf programs) 90 * 91 * The bpfloader_{min/max}_ver defines the [min, max) range of bpfloader 92 * versions that should load this .o file (bpfloaders outside of this range 93 * will simply ignore/skip this *entire* .o) 94 * The [inclusive,exclusive) matches what we do for kernel ver dependencies. 95 * 96 * The size_of_bpf_{map,prog}_def allow the bpfloader to load programs where 97 * these structures have been extended with additional fields (they will of 98 * course simply be ignored then). 99 * 100 * If missing, bpfloader_{min/max}_ver default to 0/0x10000 ie. [v0.0, v1.0), 101 * while size_of_bpf_{map/prog}_def default to 32/20 which are the v0.0 sizes. 102 */ 103 #define LICENSE(NAME) \ 104 unsigned int _bpfloader_min_ver SECTION("bpfloader_min_ver") = BPFLOADER_MIN_VER; \ 105 unsigned int _bpfloader_max_ver SECTION("bpfloader_max_ver") = BPFLOADER_MAX_VER; \ 106 size_t _size_of_bpf_map_def SECTION("size_of_bpf_map_def") = sizeof(struct bpf_map_def); \ 107 size_t _size_of_bpf_prog_def SECTION("size_of_bpf_prog_def") = sizeof(struct bpf_prog_def); \ 108 char _license[] SECTION("license") = (NAME) 109 110 /* This macro disables loading BTF map debug information on Android <=U *and* all user builds. 111 * 112 * Note: Bpfloader v0.39+ honours 'btf_user_min_bpfloader_ver' on user builds, 113 * and 'btf_min_bpfloader_ver' on non-user builds. 114 * Older BTF capable versions unconditionally honour 'btf_min_bpfloader_ver' 115 */ 116 #define DISABLE_BTF_ON_USER_BUILDS() \ 117 unsigned _btf_min_bpfloader_ver SECTION("btf_min_bpfloader_ver") = 39u; \ 118 unsigned _btf_user_min_bpfloader_ver SECTION("btf_user_min_bpfloader_ver") = 0xFFFFFFFFu 119 120 #define DISABLE_ON_MAINLINE_BEFORE_U_QPR3() \ 121 unsigned _netbpfload_min_ver SECTION("netbpfload_min_ver") = BPFLOADER_MAINLINE_U_QPR3_VERSION; 122 123 /* flag the resulting bpf .o file as critical to system functionality, 124 * loading all kernel version appropriate programs in it must succeed 125 * for bpfloader success 126 */ 127 #define CRITICAL(REASON) char _critical[] SECTION("critical") = (REASON) 128 129 /* 130 * Helper functions called from eBPF programs written in C. These are 131 * implemented in the kernel sources. 132 */ 133 134 struct kver_uint { unsigned int kver; }; 135 #define KVER_(v) ((struct kver_uint){ .kver = (v) }) 136 #define KVER(a, b, c) KVER_(((a) << 24) + ((b) << 16) + (c)) 137 #define KVER_NONE KVER_(0) 138 #define KVER_4_14 KVER(4, 14, 0) 139 #define KVER_4_19 KVER(4, 19, 0) 140 #define KVER_5_4 KVER(5, 4, 0) 141 #define KVER_5_8 KVER(5, 8, 0) 142 #define KVER_5_9 KVER(5, 9, 0) 143 #define KVER_5_10 KVER(5, 10, 0) 144 #define KVER_5_15 KVER(5, 15, 0) 145 #define KVER_6_1 KVER(6, 1, 0) 146 #define KVER_6_6 KVER(6, 6, 0) 147 #define KVER_INF KVER_(0xFFFFFFFFu) 148 149 #define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver) 150 151 /* 152 * BPFFS (ie. /sys/fs/bpf) labelling is as follows: 153 * subdirectory selinux context mainline usecase / usable by 154 * / fs_bpf no [*] core operating system (ie. platform) 155 * /loader fs_bpf_loader no, U+ (as yet unused) 156 * /net_private fs_bpf_net_private yes, T+ network_stack 157 * /net_shared fs_bpf_net_shared yes, T+ network_stack & system_server 158 * /netd_readonly fs_bpf_netd_readonly yes, T+ network_stack & system_server & r/o to netd 159 * /netd_shared fs_bpf_netd_shared yes, T+ network_stack & system_server & netd [**] 160 * /tethering fs_bpf_tethering yes, S+ network_stack 161 * /vendor fs_bpf_vendor no, T+ vendor 162 * 163 * [*] initial support for bpf was added back in P, 164 * but things worked differently back then with no bpfloader, 165 * and instead netd doing stuff by hand, 166 * bpfloader with pinning into /sys/fs/bpf was (I believe) added in Q 167 * (and was definitely there in R). 168 * 169 * [**] additionally bpf programs are accessible to netutils_wrapper 170 * for use by iptables xt_bpf extensions. 171 * 172 * See cs/p:aosp-master%20-file:prebuilts/%20file:genfs_contexts%20"genfscon%20bpf" 173 */ 174 175 /* generic functions */ 176 177 /* 178 * Type-unsafe bpf map functions - avoid if possible. 179 * 180 * Using these it is possible to pass in keys/values of the wrong type/size, 181 * or, for 'bpf_map_lookup_elem_unsafe' receive into a pointer to the wrong type. 182 * You will not get a compile time failure, and for certain types of errors you 183 * might not even get a failure from the kernel's ebpf verifier during program load, 184 * instead stuff might just not work right at runtime. 185 * 186 * Instead please use: 187 * DEFINE_BPF_MAP(foo_map, TYPE, KeyType, ValueType, num_entries) 188 * where TYPE can be something like HASH or ARRAY, and num_entries is an integer. 189 * 190 * This defines the map (hence this should not be used in a header file included 191 * from multiple locations) and provides type safe accessors: 192 * ValueType * bpf_foo_map_lookup_elem(const KeyType *) 193 * int bpf_foo_map_update_elem(const KeyType *, const ValueType *, flags) 194 * int bpf_foo_map_delete_elem(const KeyType *) 195 * 196 * This will make sure that if you change the type of a map you'll get compile 197 * errors at any spots you forget to update with the new type. 198 * 199 * Note: these all take pointers to const map because from the C/eBPF point of view 200 * the map struct is really just a readonly map definition of the in kernel object. 201 * Runtime modification of the map defining struct is meaningless, since 202 * the contents is only ever used during bpf program loading & map creation 203 * by the bpf loader, and not by the eBPF program itself. 204 */ 205 static void* (*bpf_map_lookup_elem_unsafe)(const struct bpf_map_def* map, 206 const void* key) = (void*)BPF_FUNC_map_lookup_elem; 207 static int (*bpf_map_update_elem_unsafe)(const struct bpf_map_def* map, const void* key, 208 const void* value, unsigned long long flags) = (void*) 209 BPF_FUNC_map_update_elem; 210 static int (*bpf_map_delete_elem_unsafe)(const struct bpf_map_def* map, 211 const void* key) = (void*)BPF_FUNC_map_delete_elem; 212 static int (*bpf_ringbuf_output_unsafe)(const struct bpf_map_def* ringbuf, 213 const void* data, __u64 size, __u64 flags) = (void*) 214 BPF_FUNC_ringbuf_output; 215 static void* (*bpf_ringbuf_reserve_unsafe)(const struct bpf_map_def* ringbuf, 216 __u64 size, __u64 flags) = (void*) 217 BPF_FUNC_ringbuf_reserve; 218 static void (*bpf_ringbuf_submit_unsafe)(const void* data, __u64 flags) = (void*) 219 BPF_FUNC_ringbuf_submit; 220 221 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ 222 struct ____btf_map_##name { \ 223 type_key key; \ 224 type_val value; \ 225 }; \ 226 struct ____btf_map_##name \ 227 __attribute__ ((section(".maps." #name), used)) \ 228 ____btf_map_##name = { } 229 230 #define BPF_ASSERT_LOADER_VERSION(min_loader, ignore_eng, ignore_user, ignore_userdebug) \ 231 _Static_assert( \ 232 (min_loader) >= BPFLOADER_IGNORED_ON_VERSION || \ 233 !((ignore_eng).ignore_on_eng || \ 234 (ignore_user).ignore_on_user || \ 235 (ignore_userdebug).ignore_on_userdebug), \ 236 "bpfloader min version must be >= 0.33 in order to use ignored_on"); 237 238 #define DEFINE_BPF_MAP_BASE(the_map, TYPE, keysize, valuesize, num_entries, \ 239 usr, grp, md, selinux, pindir, share, minkver, \ 240 maxkver, minloader, maxloader, ignore_eng, \ 241 ignore_user, ignore_userdebug) \ 242 const struct bpf_map_def SECTION("maps") the_map = { \ 243 .type = BPF_MAP_TYPE_##TYPE, \ 244 .key_size = (keysize), \ 245 .value_size = (valuesize), \ 246 .max_entries = (num_entries), \ 247 .map_flags = 0, \ 248 .uid = (usr), \ 249 .gid = (grp), \ 250 .mode = (md), \ 251 .bpfloader_min_ver = (minloader), \ 252 .bpfloader_max_ver = (maxloader), \ 253 .min_kver = (minkver).kver, \ 254 .max_kver = (maxkver).kver, \ 255 .selinux_context = (selinux), \ 256 .pin_subdir = (pindir), \ 257 .shared = (share).shared, \ 258 .ignore_on_eng = (ignore_eng).ignore_on_eng, \ 259 .ignore_on_user = (ignore_user).ignore_on_user, \ 260 .ignore_on_userdebug = (ignore_userdebug).ignore_on_userdebug, \ 261 }; \ 262 BPF_ASSERT_LOADER_VERSION(minloader, ignore_eng, ignore_user, ignore_userdebug); 263 264 // Type safe macro to declare a ring buffer and related output functions. 265 // Compatibility: 266 // * BPF ring buffers are only available kernels 5.8 and above. Any program 267 // accessing the ring buffer should set a program level min_kver >= 5.8. 268 // * The definition below sets a map min_kver of 5.8 which requires targeting 269 // a BPFLOADER_MIN_VER >= BPFLOADER_S_VERSION. 270 #define DEFINE_BPF_RINGBUF_EXT(the_map, ValueType, size_bytes, usr, grp, md, \ 271 selinux, pindir, share, min_loader, max_loader, \ 272 ignore_eng, ignore_user, ignore_userdebug) \ 273 DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md, \ 274 selinux, pindir, share, KVER_5_8, KVER_INF, \ 275 min_loader, max_loader, ignore_eng, ignore_user, \ 276 ignore_userdebug); \ 277 \ 278 _Static_assert((size_bytes) >= 4096, "min 4 kiB ringbuffer size"); \ 279 _Static_assert((size_bytes) <= 0x10000000, "max 256 MiB ringbuffer size"); \ 280 _Static_assert(((size_bytes) & ((size_bytes) - 1)) == 0, \ 281 "ring buffer size must be a power of two"); \ 282 \ 283 static inline __always_inline __unused int bpf_##the_map##_output( \ 284 const ValueType* v) { \ 285 return bpf_ringbuf_output_unsafe(&the_map, v, sizeof(*v), 0); \ 286 } \ 287 \ 288 static inline __always_inline __unused \ 289 ValueType* bpf_##the_map##_reserve() { \ 290 return bpf_ringbuf_reserve_unsafe(&the_map, sizeof(ValueType), 0); \ 291 } \ 292 \ 293 static inline __always_inline __unused void bpf_##the_map##_submit( \ 294 const ValueType* v) { \ 295 bpf_ringbuf_submit_unsafe(v, 0); \ 296 } 297 298 /* There exist buggy kernels with pre-T OS, that due to 299 * kernel patch "[ALPS05162612] bpf: fix ubsan error" 300 * do not support userspace writes into non-zero index of bpf map arrays. 301 * 302 * We use this assert to prevent us from being able to define such a map. 303 */ 304 305 #ifdef THIS_BPF_PROGRAM_IS_FOR_TEST_PURPOSES_ONLY 306 #define BPF_MAP_ASSERT_OK(type, entries, mode) 307 #elif BPFLOADER_MIN_VER >= BPFLOADER_T_VERSION 308 #define BPF_MAP_ASSERT_OK(type, entries, mode) 309 #else 310 #define BPF_MAP_ASSERT_OK(type, entries, mode) \ 311 _Static_assert(((type) != BPF_MAP_TYPE_ARRAY) || ((entries) <= 1) || !((mode) & 0222), \ 312 "Writable arrays with more than 1 element not supported on pre-T devices.") 313 #endif 314 315 /* type safe macro to declare a map and related accessor functions */ 316 #define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \ 317 selinux, pindir, share, min_loader, max_loader, ignore_eng, \ 318 ignore_user, ignore_userdebug) \ 319 DEFINE_BPF_MAP_BASE(the_map, TYPE, sizeof(KeyType), sizeof(ValueType), \ 320 num_entries, usr, grp, md, selinux, pindir, share, \ 321 KVER_NONE, KVER_INF, min_loader, max_loader, \ 322 ignore_eng, ignore_user, ignore_userdebug); \ 323 BPF_MAP_ASSERT_OK(BPF_MAP_TYPE_##TYPE, (num_entries), (md)); \ 324 _Static_assert(sizeof(KeyType) < 1024, "aosp/2370288 requires < 1024 byte keys"); \ 325 _Static_assert(sizeof(ValueType) < 65536, "aosp/2370288 requires < 65536 byte values"); \ 326 BPF_ANNOTATE_KV_PAIR(the_map, KeyType, ValueType); \ 327 \ 328 static inline __always_inline __unused ValueType* bpf_##the_map##_lookup_elem( \ 329 const KeyType* k) { \ 330 return bpf_map_lookup_elem_unsafe(&the_map, k); \ 331 }; \ 332 \ 333 static inline __always_inline __unused int bpf_##the_map##_update_elem( \ 334 const KeyType* k, const ValueType* v, unsigned long long flags) { \ 335 return bpf_map_update_elem_unsafe(&the_map, k, v, flags); \ 336 }; \ 337 \ 338 static inline __always_inline __unused int bpf_##the_map##_delete_elem(const KeyType* k) { \ 339 return bpf_map_delete_elem_unsafe(&the_map, k); \ 340 }; 341 342 #ifndef DEFAULT_BPF_MAP_SELINUX_CONTEXT 343 #define DEFAULT_BPF_MAP_SELINUX_CONTEXT "" 344 #endif 345 346 #ifndef DEFAULT_BPF_MAP_PIN_SUBDIR 347 #define DEFAULT_BPF_MAP_PIN_SUBDIR "" 348 #endif 349 350 #ifndef DEFAULT_BPF_MAP_UID 351 #define DEFAULT_BPF_MAP_UID AID_ROOT 352 #elif BPFLOADER_MIN_VER < 28u 353 #error "Bpf Map UID must be left at default of AID_ROOT for BpfLoader prior to v0.28" 354 #endif 355 356 #define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md) \ 357 DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \ 358 DEFAULT_BPF_MAP_SELINUX_CONTEXT, DEFAULT_BPF_MAP_PIN_SUBDIR, PRIVATE, \ 359 BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, LOAD_ON_ENG, \ 360 LOAD_ON_USER, LOAD_ON_USERDEBUG) 361 362 #define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \ 363 DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ 364 DEFAULT_BPF_MAP_UID, AID_ROOT, 0600) 365 366 #define DEFINE_BPF_MAP_RO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ 367 DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ 368 DEFAULT_BPF_MAP_UID, gid, 0440) 369 370 #define DEFINE_BPF_MAP_GWO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ 371 DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ 372 DEFAULT_BPF_MAP_UID, gid, 0620) 373 374 #define DEFINE_BPF_MAP_GRO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ 375 DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ 376 DEFAULT_BPF_MAP_UID, gid, 0640) 377 378 #define DEFINE_BPF_MAP_GRW(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ 379 DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, \ 380 DEFAULT_BPF_MAP_UID, gid, 0660) 381 382 // LLVM eBPF builtins: they directly generate BPF_LD_ABS/BPF_LD_IND (skb may be ignored?) 383 unsigned long long load_byte(void* skb, unsigned long long off) asm("llvm.bpf.load.byte"); 384 unsigned long long load_half(void* skb, unsigned long long off) asm("llvm.bpf.load.half"); 385 unsigned long long load_word(void* skb, unsigned long long off) asm("llvm.bpf.load.word"); 386 387 static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read; 388 static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str; 389 static int (*bpf_probe_read_user)(void* dst, int size, const void* unsafe_ptr) = (void*)BPF_FUNC_probe_read_user; 390 static int (*bpf_probe_read_user_str)(void* dst, int size, const void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_user_str; 391 static unsigned long long (*bpf_ktime_get_ns)(void) = (void*) BPF_FUNC_ktime_get_ns; 392 static unsigned long long (*bpf_ktime_get_boot_ns)(void) = (void*)BPF_FUNC_ktime_get_boot_ns; 393 static int (*bpf_trace_printk)(const char* fmt, int fmt_size, ...) = (void*) BPF_FUNC_trace_printk; 394 static unsigned long long (*bpf_get_current_pid_tgid)(void) = (void*) BPF_FUNC_get_current_pid_tgid; 395 static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid; 396 static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id; 397 static long (*bpf_get_stackid)(void* ctx, void* map, uint64_t flags) = (void*) BPF_FUNC_get_stackid; 398 static long (*bpf_get_current_comm)(void* buf, uint32_t buf_size) = (void*) BPF_FUNC_get_current_comm; 399 400 #define DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ 401 min_loader, max_loader, opt, selinux, pindir, ignore_eng, \ 402 ignore_user, ignore_userdebug) \ 403 const struct bpf_prog_def SECTION("progs") the_prog##_def = { \ 404 .uid = (prog_uid), \ 405 .gid = (prog_gid), \ 406 .min_kver = (min_kv).kver, \ 407 .max_kver = (max_kv).kver, \ 408 .optional = (opt).optional, \ 409 .bpfloader_min_ver = (min_loader), \ 410 .bpfloader_max_ver = (max_loader), \ 411 .selinux_context = (selinux), \ 412 .pin_subdir = (pindir), \ 413 .ignore_on_eng = (ignore_eng).ignore_on_eng, \ 414 .ignore_on_user = (ignore_user).ignore_on_user, \ 415 .ignore_on_userdebug = (ignore_userdebug).ignore_on_userdebug, \ 416 }; \ 417 SECTION(SECTION_NAME) \ 418 int the_prog 419 420 #ifndef DEFAULT_BPF_PROG_SELINUX_CONTEXT 421 #define DEFAULT_BPF_PROG_SELINUX_CONTEXT "" 422 #endif 423 424 #ifndef DEFAULT_BPF_PROG_PIN_SUBDIR 425 #define DEFAULT_BPF_PROG_PIN_SUBDIR "" 426 #endif 427 428 #define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ 429 opt) \ 430 DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ 431 BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, opt, \ 432 DEFAULT_BPF_PROG_SELINUX_CONTEXT, DEFAULT_BPF_PROG_PIN_SUBDIR, \ 433 LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) 434 435 // Programs (here used in the sense of functions/sections) marked optional are allowed to fail 436 // to load (for example due to missing kernel patches). 437 // The bpfloader will just ignore these failures and continue processing the next section. 438 // 439 // A non-optional program (function/section) failing to load causes a failure and aborts 440 // processing of the entire .o, if the .o is additionally marked critical, this will result 441 // in the entire bpfloader process terminating with a failure and not setting the bpf.progs_loaded 442 // system property. This in turn results in waitForProgsLoaded() never finishing. 443 // 444 // ie. a non-optional program in a critical .o is mandatory for kernels matching the min/max kver. 445 446 // programs requiring a kernel version >= min_kv && < max_kv 447 #define DEFINE_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv) \ 448 DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ 449 MANDATORY) 450 #define DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, \ 451 max_kv) \ 452 DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ 453 OPTIONAL) 454 455 // programs requiring a kernel version >= min_kv 456 #define DEFINE_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \ 457 DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \ 458 MANDATORY) 459 #define DEFINE_OPTIONAL_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \ 460 DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \ 461 OPTIONAL) 462 463 // programs with no kernel version requirements 464 #define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ 465 DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \ 466 MANDATORY) 467 #define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ 468 DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \ 469 OPTIONAL) 470