1 /*
2 * Copyright (C) 2018-2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "NetBpfLoad"
18
19 #include <errno.h>
20 #include <fcntl.h>
21 #include <linux/bpf.h>
22 #include <linux/elf.h>
23 #include <log/log.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <sysexits.h>
29 #include <sys/stat.h>
30 #include <sys/utsname.h>
31 #include <sys/wait.h>
32 #include <unistd.h>
33
34 #include "BpfSyscallWrappers.h"
35 #include "bpf/BpfUtils.h"
36 #include "bpf/bpf_map_def.h"
37 #include "loader.h"
38
39 #include <cstdlib>
40 #include <fstream>
41 #include <iostream>
42 #include <optional>
43 #include <string>
44 #include <unordered_map>
45 #include <vector>
46
47 #include <android-base/cmsg.h>
48 #include <android-base/file.h>
49 #include <android-base/properties.h>
50 #include <android-base/strings.h>
51 #include <android-base/unique_fd.h>
52
53 #define BPF_FS_PATH "/sys/fs/bpf/"
54
55 // Size of the BPF log buffer for verifier logging
56 #define BPF_LOAD_LOG_SZ 0xfffff
57
58 // Unspecified attach type is 0 which is BPF_CGROUP_INET_INGRESS.
59 #define BPF_ATTACH_TYPE_UNSPEC BPF_CGROUP_INET_INGRESS
60
61 using android::base::StartsWith;
62 using android::base::unique_fd;
63 using std::ifstream;
64 using std::ios;
65 using std::optional;
66 using std::string;
67 using std::vector;
68
69 namespace android {
70 namespace bpf {
71
getBuildType()72 const std::string& getBuildType() {
73 static std::string t = android::base::GetProperty("ro.build.type", "unknown");
74 return t;
75 }
76
77 static unsigned int page_size = static_cast<unsigned int>(getpagesize());
78
lookupSelinuxContext(const domain d,const char * const unspecified="")79 constexpr const char* lookupSelinuxContext(const domain d, const char* const unspecified = "") {
80 switch (d) {
81 case domain::unspecified: return unspecified;
82 case domain::tethering: return "fs_bpf_tethering";
83 case domain::net_private: return "fs_bpf_net_private";
84 case domain::net_shared: return "fs_bpf_net_shared";
85 case domain::netd_readonly: return "fs_bpf_netd_readonly";
86 case domain::netd_shared: return "fs_bpf_netd_shared";
87 default: return "(unrecognized)";
88 }
89 }
90
getDomainFromSelinuxContext(const char s[BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE])91 domain getDomainFromSelinuxContext(const char s[BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE]) {
92 for (domain d : AllDomains) {
93 // Not sure how to enforce this at compile time, so abort() bpfloader at boot instead
94 if (strlen(lookupSelinuxContext(d)) >= BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE) abort();
95 if (!strncmp(s, lookupSelinuxContext(d), BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE)) return d;
96 }
97 ALOGW("ignoring unrecognized selinux_context '%-32s'", s);
98 // We should return 'unrecognized' here, however: returning unspecified will
99 // result in the system simply using the default context, which in turn
100 // will allow future expansion by adding more restrictive selinux types.
101 // Older bpfloader will simply ignore that, and use the less restrictive default.
102 // This does mean you CANNOT later add a *less* restrictive type than the default.
103 //
104 // Note: we cannot just abort() here as this might be a mainline module shipped optional update
105 return domain::unspecified;
106 }
107
lookupPinSubdir(const domain d,const char * const unspecified="")108 constexpr const char* lookupPinSubdir(const domain d, const char* const unspecified = "") {
109 switch (d) {
110 case domain::unspecified: return unspecified;
111 case domain::tethering: return "tethering/";
112 case domain::net_private: return "net_private/";
113 case domain::net_shared: return "net_shared/";
114 case domain::netd_readonly: return "netd_readonly/";
115 case domain::netd_shared: return "netd_shared/";
116 default: return "(unrecognized)";
117 }
118 };
119
getDomainFromPinSubdir(const char s[BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE])120 domain getDomainFromPinSubdir(const char s[BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE]) {
121 for (domain d : AllDomains) {
122 // Not sure how to enforce this at compile time, so abort() bpfloader at boot instead
123 if (strlen(lookupPinSubdir(d)) >= BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE) abort();
124 if (!strncmp(s, lookupPinSubdir(d), BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE)) return d;
125 }
126 ALOGE("unrecognized pin_subdir '%-32s'", s);
127 // pin_subdir affects the object's full pathname,
128 // and thus using the default would change the location and thus our code's ability to find it,
129 // hence this seems worth treating as a true error condition.
130 //
131 // Note: we cannot just abort() here as this might be a mainline module shipped optional update
132 // However, our callers will treat this as an error, and stop loading the specific .o,
133 // which will fail bpfloader if the .o is marked critical.
134 return domain::unrecognized;
135 }
136
pathToObjName(const string & path)137 static string pathToObjName(const string& path) {
138 // extract everything after the final slash, ie. this is the filename 'foo@1.o' or 'bar.o'
139 string filename = android::base::Split(path, "/").back();
140 // strip off everything from the final period onwards (strip '.o' suffix), ie. 'foo@1' or 'bar'
141 string name = filename.substr(0, filename.find_last_of('.'));
142 // strip any potential @1 suffix, this will leave us with just 'foo' or 'bar'
143 // this can be used to provide duplicate programs (mux based on the bpfloader version)
144 return name.substr(0, name.find_last_of('@'));
145 }
146
147 typedef struct {
148 const char* name;
149 enum bpf_prog_type type;
150 enum bpf_attach_type expected_attach_type;
151 } sectionType;
152
153 /*
154 * Map section name prefixes to program types, the section name will be:
155 * SECTION(<prefix>/<name-of-program>)
156 * For example:
157 * SECTION("tracepoint/sched_switch_func") where sched_switch_funcs
158 * is the name of the program, and tracepoint is the type.
159 *
160 * However, be aware that you should not be directly using the SECTION() macro.
161 * Instead use the DEFINE_(BPF|XDP)_(PROG|MAP)... & LICENSE/CRITICAL macros.
162 *
163 * Programs shipped inside the tethering apex should be limited to networking stuff,
164 * as KPROBE, PERF_EVENT, TRACEPOINT are dangerous to use from mainline updatable code,
165 * since they are less stable abi/api and may conflict with platform uses of bpf.
166 */
167 sectionType sectionNameTypes[] = {
168 {"bind4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
169 {"bind6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
170 {"cgroupskb/", BPF_PROG_TYPE_CGROUP_SKB, BPF_ATTACH_TYPE_UNSPEC},
171 {"cgroupsock/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_ATTACH_TYPE_UNSPEC},
172 {"cgroupsockcreate/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE},
173 {"cgroupsockrelease/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE},
174 {"connect4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
175 {"connect6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
176 {"egress/", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS},
177 {"getsockopt/", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT},
178 {"ingress/", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS},
179 {"lwt_in/", BPF_PROG_TYPE_LWT_IN, BPF_ATTACH_TYPE_UNSPEC},
180 {"lwt_out/", BPF_PROG_TYPE_LWT_OUT, BPF_ATTACH_TYPE_UNSPEC},
181 {"lwt_seg6local/", BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_ATTACH_TYPE_UNSPEC},
182 {"lwt_xmit/", BPF_PROG_TYPE_LWT_XMIT, BPF_ATTACH_TYPE_UNSPEC},
183 {"postbind4/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
184 {"postbind6/", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
185 {"recvmsg4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
186 {"recvmsg6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
187 {"schedact/", BPF_PROG_TYPE_SCHED_ACT, BPF_ATTACH_TYPE_UNSPEC},
188 {"schedcls/", BPF_PROG_TYPE_SCHED_CLS, BPF_ATTACH_TYPE_UNSPEC},
189 {"sendmsg4/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
190 {"sendmsg6/", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
191 {"setsockopt/", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT},
192 {"skfilter/", BPF_PROG_TYPE_SOCKET_FILTER, BPF_ATTACH_TYPE_UNSPEC},
193 {"sockops/", BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS},
194 {"sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
195 {"xdp/", BPF_PROG_TYPE_XDP, BPF_ATTACH_TYPE_UNSPEC},
196 };
197
198 typedef struct {
199 enum bpf_prog_type type;
200 enum bpf_attach_type expected_attach_type;
201 string name;
202 vector<char> data;
203 vector<char> rel_data;
204 optional<struct bpf_prog_def> prog_def;
205
206 unique_fd prog_fd; /* fd after loading */
207 } codeSection;
208
readElfHeader(ifstream & elfFile,Elf64_Ehdr * eh)209 static int readElfHeader(ifstream& elfFile, Elf64_Ehdr* eh) {
210 elfFile.seekg(0);
211 if (elfFile.fail()) return -1;
212
213 if (!elfFile.read((char*)eh, sizeof(*eh))) return -1;
214
215 return 0;
216 }
217
218 /* Reads all section header tables into an Shdr array */
readSectionHeadersAll(ifstream & elfFile,vector<Elf64_Shdr> & shTable)219 static int readSectionHeadersAll(ifstream& elfFile, vector<Elf64_Shdr>& shTable) {
220 Elf64_Ehdr eh;
221 int ret = 0;
222
223 ret = readElfHeader(elfFile, &eh);
224 if (ret) return ret;
225
226 elfFile.seekg(eh.e_shoff);
227 if (elfFile.fail()) return -1;
228
229 /* Read shdr table entries */
230 shTable.resize(eh.e_shnum);
231
232 if (!elfFile.read((char*)shTable.data(), (eh.e_shnum * eh.e_shentsize))) return -ENOMEM;
233
234 return 0;
235 }
236
237 /* Read a section by its index - for ex to get sec hdr strtab blob */
readSectionByIdx(ifstream & elfFile,int id,vector<char> & sec)238 static int readSectionByIdx(ifstream& elfFile, int id, vector<char>& sec) {
239 vector<Elf64_Shdr> shTable;
240 int ret = readSectionHeadersAll(elfFile, shTable);
241 if (ret) return ret;
242
243 elfFile.seekg(shTable[id].sh_offset);
244 if (elfFile.fail()) return -1;
245
246 sec.resize(shTable[id].sh_size);
247 if (!elfFile.read(sec.data(), shTable[id].sh_size)) return -1;
248
249 return 0;
250 }
251
252 /* Read whole section header string table */
readSectionHeaderStrtab(ifstream & elfFile,vector<char> & strtab)253 static int readSectionHeaderStrtab(ifstream& elfFile, vector<char>& strtab) {
254 Elf64_Ehdr eh;
255 int ret = readElfHeader(elfFile, &eh);
256 if (ret) return ret;
257
258 ret = readSectionByIdx(elfFile, eh.e_shstrndx, strtab);
259 if (ret) return ret;
260
261 return 0;
262 }
263
264 /* Get name from offset in strtab */
getSymName(ifstream & elfFile,int nameOff,string & name)265 static int getSymName(ifstream& elfFile, int nameOff, string& name) {
266 int ret;
267 vector<char> secStrTab;
268
269 ret = readSectionHeaderStrtab(elfFile, secStrTab);
270 if (ret) return ret;
271
272 if (nameOff >= (int)secStrTab.size()) return -1;
273
274 name = string((char*)secStrTab.data() + nameOff);
275 return 0;
276 }
277
278 /* Reads a full section by name - example to get the GPL license */
readSectionByName(const char * name,ifstream & elfFile,vector<char> & data)279 static int readSectionByName(const char* name, ifstream& elfFile, vector<char>& data) {
280 vector<char> secStrTab;
281 vector<Elf64_Shdr> shTable;
282 int ret;
283
284 ret = readSectionHeadersAll(elfFile, shTable);
285 if (ret) return ret;
286
287 ret = readSectionHeaderStrtab(elfFile, secStrTab);
288 if (ret) return ret;
289
290 for (int i = 0; i < (int)shTable.size(); i++) {
291 char* secname = secStrTab.data() + shTable[i].sh_name;
292 if (!secname) continue;
293
294 if (!strcmp(secname, name)) {
295 vector<char> dataTmp;
296 dataTmp.resize(shTable[i].sh_size);
297
298 elfFile.seekg(shTable[i].sh_offset);
299 if (elfFile.fail()) return -1;
300
301 if (!elfFile.read((char*)dataTmp.data(), shTable[i].sh_size)) return -1;
302
303 data = dataTmp;
304 return 0;
305 }
306 }
307 return -2;
308 }
309
readSectionUint(const char * name,ifstream & elfFile,unsigned int defVal)310 unsigned int readSectionUint(const char* name, ifstream& elfFile, unsigned int defVal) {
311 vector<char> theBytes;
312 int ret = readSectionByName(name, elfFile, theBytes);
313 if (ret) {
314 ALOGD("Couldn't find section %s (defaulting to %u [0x%x]).", name, defVal, defVal);
315 return defVal;
316 } else if (theBytes.size() < sizeof(unsigned int)) {
317 ALOGE("Section %s too short (defaulting to %u [0x%x]).", name, defVal, defVal);
318 return defVal;
319 } else {
320 // decode first 4 bytes as LE32 uint, there will likely be more bytes due to alignment.
321 unsigned int value = static_cast<unsigned char>(theBytes[3]);
322 value <<= 8;
323 value += static_cast<unsigned char>(theBytes[2]);
324 value <<= 8;
325 value += static_cast<unsigned char>(theBytes[1]);
326 value <<= 8;
327 value += static_cast<unsigned char>(theBytes[0]);
328 ALOGI("Section %s value is %u [0x%x]", name, value, value);
329 return value;
330 }
331 }
332
readSectionByType(ifstream & elfFile,int type,vector<char> & data)333 static int readSectionByType(ifstream& elfFile, int type, vector<char>& data) {
334 int ret;
335 vector<Elf64_Shdr> shTable;
336
337 ret = readSectionHeadersAll(elfFile, shTable);
338 if (ret) return ret;
339
340 for (int i = 0; i < (int)shTable.size(); i++) {
341 if ((int)shTable[i].sh_type != type) continue;
342
343 vector<char> dataTmp;
344 dataTmp.resize(shTable[i].sh_size);
345
346 elfFile.seekg(shTable[i].sh_offset);
347 if (elfFile.fail()) return -1;
348
349 if (!elfFile.read((char*)dataTmp.data(), shTable[i].sh_size)) return -1;
350
351 data = dataTmp;
352 return 0;
353 }
354 return -2;
355 }
356
symCompare(Elf64_Sym a,Elf64_Sym b)357 static bool symCompare(Elf64_Sym a, Elf64_Sym b) {
358 return (a.st_value < b.st_value);
359 }
360
readSymTab(ifstream & elfFile,int sort,vector<Elf64_Sym> & data)361 static int readSymTab(ifstream& elfFile, int sort, vector<Elf64_Sym>& data) {
362 int ret, numElems;
363 Elf64_Sym* buf;
364 vector<char> secData;
365
366 ret = readSectionByType(elfFile, SHT_SYMTAB, secData);
367 if (ret) return ret;
368
369 buf = (Elf64_Sym*)secData.data();
370 numElems = (secData.size() / sizeof(Elf64_Sym));
371 data.assign(buf, buf + numElems);
372
373 if (sort) std::sort(data.begin(), data.end(), symCompare);
374 return 0;
375 }
376
getSectionType(string & name)377 static enum bpf_prog_type getSectionType(string& name) {
378 for (auto& snt : sectionNameTypes)
379 if (StartsWith(name, snt.name)) return snt.type;
380
381 return BPF_PROG_TYPE_UNSPEC;
382 }
383
getExpectedAttachType(string & name)384 static enum bpf_attach_type getExpectedAttachType(string& name) {
385 for (auto& snt : sectionNameTypes)
386 if (StartsWith(name, snt.name)) return snt.expected_attach_type;
387 return BPF_ATTACH_TYPE_UNSPEC;
388 }
389
390 /*
391 static string getSectionName(enum bpf_prog_type type)
392 {
393 for (auto& snt : sectionNameTypes)
394 if (snt.type == type)
395 return string(snt.name);
396
397 return "UNKNOWN SECTION NAME " + std::to_string(type);
398 }
399 */
400
readProgDefs(ifstream & elfFile,vector<struct bpf_prog_def> & pd,size_t sizeOfBpfProgDef)401 static int readProgDefs(ifstream& elfFile, vector<struct bpf_prog_def>& pd,
402 size_t sizeOfBpfProgDef) {
403 vector<char> pdData;
404 int ret = readSectionByName("progs", elfFile, pdData);
405 if (ret) return ret;
406
407 if (pdData.size() % sizeOfBpfProgDef) {
408 ALOGE("readProgDefs failed due to improper sized progs section, %zu %% %zu != 0",
409 pdData.size(), sizeOfBpfProgDef);
410 return -1;
411 };
412
413 int progCount = pdData.size() / sizeOfBpfProgDef;
414 pd.resize(progCount);
415 size_t trimmedSize = std::min(sizeOfBpfProgDef, sizeof(struct bpf_prog_def));
416
417 const char* dataPtr = pdData.data();
418 for (auto& p : pd) {
419 // First we zero initialize
420 memset(&p, 0, sizeof(p));
421 // Then we set non-zero defaults
422 p.bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER; // v1.0
423 // Then we copy over the structure prefix from the ELF file.
424 memcpy(&p, dataPtr, trimmedSize);
425 // Move to next struct in the ELF file
426 dataPtr += sizeOfBpfProgDef;
427 }
428 return 0;
429 }
430
getSectionSymNames(ifstream & elfFile,const string & sectionName,vector<string> & names,optional<unsigned> symbolType=std::nullopt)431 static int getSectionSymNames(ifstream& elfFile, const string& sectionName, vector<string>& names,
432 optional<unsigned> symbolType = std::nullopt) {
433 int ret;
434 string name;
435 vector<Elf64_Sym> symtab;
436 vector<Elf64_Shdr> shTable;
437
438 ret = readSymTab(elfFile, 1 /* sort */, symtab);
439 if (ret) return ret;
440
441 /* Get index of section */
442 ret = readSectionHeadersAll(elfFile, shTable);
443 if (ret) return ret;
444
445 int sec_idx = -1;
446 for (int i = 0; i < (int)shTable.size(); i++) {
447 ret = getSymName(elfFile, shTable[i].sh_name, name);
448 if (ret) return ret;
449
450 if (!name.compare(sectionName)) {
451 sec_idx = i;
452 break;
453 }
454 }
455
456 /* No section found with matching name*/
457 if (sec_idx == -1) {
458 ALOGW("No %s section could be found in elf object", sectionName.c_str());
459 return -1;
460 }
461
462 for (int i = 0; i < (int)symtab.size(); i++) {
463 if (symbolType.has_value() && ELF_ST_TYPE(symtab[i].st_info) != symbolType) continue;
464
465 if (symtab[i].st_shndx == sec_idx) {
466 string s;
467 ret = getSymName(elfFile, symtab[i].st_name, s);
468 if (ret) return ret;
469 names.push_back(s);
470 }
471 }
472
473 return 0;
474 }
475
476 /* Read a section by its index - for ex to get sec hdr strtab blob */
readCodeSections(ifstream & elfFile,vector<codeSection> & cs,size_t sizeOfBpfProgDef)477 static int readCodeSections(ifstream& elfFile, vector<codeSection>& cs, size_t sizeOfBpfProgDef) {
478 vector<Elf64_Shdr> shTable;
479 int entries, ret = 0;
480
481 ret = readSectionHeadersAll(elfFile, shTable);
482 if (ret) return ret;
483 entries = shTable.size();
484
485 vector<struct bpf_prog_def> pd;
486 ret = readProgDefs(elfFile, pd, sizeOfBpfProgDef);
487 if (ret) return ret;
488 vector<string> progDefNames;
489 ret = getSectionSymNames(elfFile, "progs", progDefNames);
490 if (!pd.empty() && ret) return ret;
491
492 for (int i = 0; i < entries; i++) {
493 string name;
494 codeSection cs_temp;
495 cs_temp.type = BPF_PROG_TYPE_UNSPEC;
496
497 ret = getSymName(elfFile, shTable[i].sh_name, name);
498 if (ret) return ret;
499
500 enum bpf_prog_type ptype = getSectionType(name);
501
502 if (ptype == BPF_PROG_TYPE_UNSPEC) continue;
503
504 // This must be done before '/' is replaced with '_'.
505 cs_temp.expected_attach_type = getExpectedAttachType(name);
506
507 string oldName = name;
508
509 // convert all slashes to underscores
510 std::replace(name.begin(), name.end(), '/', '_');
511
512 cs_temp.type = ptype;
513 cs_temp.name = name;
514
515 ret = readSectionByIdx(elfFile, i, cs_temp.data);
516 if (ret) return ret;
517 ALOGV("Loaded code section %d (%s)", i, name.c_str());
518
519 vector<string> csSymNames;
520 ret = getSectionSymNames(elfFile, oldName, csSymNames, STT_FUNC);
521 if (ret || !csSymNames.size()) return ret;
522 for (size_t i = 0; i < progDefNames.size(); ++i) {
523 if (!progDefNames[i].compare(csSymNames[0] + "_def")) {
524 cs_temp.prog_def = pd[i];
525 break;
526 }
527 }
528
529 /* Check for rel section */
530 if (cs_temp.data.size() > 0 && i < entries) {
531 ret = getSymName(elfFile, shTable[i + 1].sh_name, name);
532 if (ret) return ret;
533
534 if (name == (".rel" + oldName)) {
535 ret = readSectionByIdx(elfFile, i + 1, cs_temp.rel_data);
536 if (ret) return ret;
537 ALOGV("Loaded relo section %d (%s)", i, name.c_str());
538 }
539 }
540
541 if (cs_temp.data.size() > 0) {
542 cs.push_back(std::move(cs_temp));
543 ALOGV("Adding section %d to cs list", i);
544 }
545 }
546 return 0;
547 }
548
getSymNameByIdx(ifstream & elfFile,int index,string & name)549 static int getSymNameByIdx(ifstream& elfFile, int index, string& name) {
550 vector<Elf64_Sym> symtab;
551 int ret = 0;
552
553 ret = readSymTab(elfFile, 0 /* !sort */, symtab);
554 if (ret) return ret;
555
556 if (index >= (int)symtab.size()) return -1;
557
558 return getSymName(elfFile, symtab[index].st_name, name);
559 }
560
mapMatchesExpectations(const unique_fd & fd,const string & mapName,const struct bpf_map_def & mapDef,const enum bpf_map_type type)561 static bool mapMatchesExpectations(const unique_fd& fd, const string& mapName,
562 const struct bpf_map_def& mapDef, const enum bpf_map_type type) {
563 // bpfGetFd... family of functions require at minimum a 4.14 kernel,
564 // so on 4.9-T kernels just pretend the map matches our expectations.
565 // Additionally we'll get almost equivalent test coverage on newer devices/kernels.
566 // This is because the primary failure mode we're trying to detect here
567 // is either a source code misconfiguration (which is likely kernel independent)
568 // or a newly introduced kernel feature/bug (which is unlikely to get backported to 4.9).
569 if (!isAtLeastKernelVersion(4, 14, 0)) return true;
570
571 // Assuming fd is a valid Bpf Map file descriptor then
572 // all the following should always succeed on a 4.14+ kernel.
573 // If they somehow do fail, they'll return -1 (and set errno),
574 // which should then cause (among others) a key_size mismatch.
575 int fd_type = bpfGetFdMapType(fd);
576 int fd_key_size = bpfGetFdKeySize(fd);
577 int fd_value_size = bpfGetFdValueSize(fd);
578 int fd_max_entries = bpfGetFdMaxEntries(fd);
579 int fd_map_flags = bpfGetFdMapFlags(fd);
580
581 // DEVMAPs are readonly from the bpf program side's point of view, as such
582 // the kernel in kernel/bpf/devmap.c dev_map_init_map() will set the flag
583 int desired_map_flags = (int)mapDef.map_flags;
584 if (type == BPF_MAP_TYPE_DEVMAP || type == BPF_MAP_TYPE_DEVMAP_HASH)
585 desired_map_flags |= BPF_F_RDONLY_PROG;
586
587 // The .h file enforces that this is a power of two, and page size will
588 // also always be a power of two, so this logic is actually enough to
589 // force it to be a multiple of the page size, as required by the kernel.
590 unsigned int desired_max_entries = mapDef.max_entries;
591 if (type == BPF_MAP_TYPE_RINGBUF) {
592 if (desired_max_entries < page_size) desired_max_entries = page_size;
593 }
594
595 // The following checks should *never* trigger, if one of them somehow does,
596 // it probably means a bpf .o file has been changed/replaced at runtime
597 // and bpfloader was manually rerun (normally it should only run *once*
598 // early during the boot process).
599 // Another possibility is that something is misconfigured in the code:
600 // most likely a shared map is declared twice differently.
601 // But such a change should never be checked into the source tree...
602 if ((fd_type == type) &&
603 (fd_key_size == (int)mapDef.key_size) &&
604 (fd_value_size == (int)mapDef.value_size) &&
605 (fd_max_entries == (int)desired_max_entries) &&
606 (fd_map_flags == desired_map_flags)) {
607 return true;
608 }
609
610 ALOGE("bpf map name %s mismatch: desired/found: "
611 "type:%d/%d key:%u/%d value:%u/%d entries:%u/%d flags:%u/%d",
612 mapName.c_str(), type, fd_type, mapDef.key_size, fd_key_size, mapDef.value_size,
613 fd_value_size, mapDef.max_entries, fd_max_entries, desired_map_flags, fd_map_flags);
614 return false;
615 }
616
createMaps(const char * elfPath,ifstream & elfFile,vector<unique_fd> & mapFds,const char * prefix,const size_t sizeOfBpfMapDef,const unsigned int bpfloader_ver)617 static int createMaps(const char* elfPath, ifstream& elfFile, vector<unique_fd>& mapFds,
618 const char* prefix, const size_t sizeOfBpfMapDef,
619 const unsigned int bpfloader_ver) {
620 int ret;
621 vector<char> mdData;
622 vector<struct bpf_map_def> md;
623 vector<string> mapNames;
624 string objName = pathToObjName(string(elfPath));
625
626 ret = readSectionByName("maps", elfFile, mdData);
627 if (ret == -2) return 0; // no maps to read
628 if (ret) return ret;
629
630 if (mdData.size() % sizeOfBpfMapDef) {
631 ALOGE("createMaps failed due to improper sized maps section, %zu %% %zu != 0",
632 mdData.size(), sizeOfBpfMapDef);
633 return -1;
634 };
635
636 int mapCount = mdData.size() / sizeOfBpfMapDef;
637 md.resize(mapCount);
638 size_t trimmedSize = std::min(sizeOfBpfMapDef, sizeof(struct bpf_map_def));
639
640 const char* dataPtr = mdData.data();
641 for (auto& m : md) {
642 // First we zero initialize
643 memset(&m, 0, sizeof(m));
644 // Then we set non-zero defaults
645 m.bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER; // v1.0
646 m.max_kver = 0xFFFFFFFFu; // matches KVER_INF from bpf_helpers.h
647 // Then we copy over the structure prefix from the ELF file.
648 memcpy(&m, dataPtr, trimmedSize);
649 // Move to next struct in the ELF file
650 dataPtr += sizeOfBpfMapDef;
651 }
652
653 ret = getSectionSymNames(elfFile, "maps", mapNames);
654 if (ret) return ret;
655
656 unsigned kvers = kernelVersion();
657
658 for (int i = 0; i < (int)mapNames.size(); i++) {
659 if (md[i].zero != 0) abort();
660
661 if (bpfloader_ver < md[i].bpfloader_min_ver) {
662 ALOGI("skipping map %s which requires bpfloader min ver 0x%05x", mapNames[i].c_str(),
663 md[i].bpfloader_min_ver);
664 mapFds.push_back(unique_fd());
665 continue;
666 }
667
668 if (bpfloader_ver >= md[i].bpfloader_max_ver) {
669 ALOGI("skipping map %s which requires bpfloader max ver 0x%05x", mapNames[i].c_str(),
670 md[i].bpfloader_max_ver);
671 mapFds.push_back(unique_fd());
672 continue;
673 }
674
675 if (kvers < md[i].min_kver) {
676 ALOGI("skipping map %s which requires kernel version 0x%x >= 0x%x",
677 mapNames[i].c_str(), kvers, md[i].min_kver);
678 mapFds.push_back(unique_fd());
679 continue;
680 }
681
682 if (kvers >= md[i].max_kver) {
683 ALOGI("skipping map %s which requires kernel version 0x%x < 0x%x",
684 mapNames[i].c_str(), kvers, md[i].max_kver);
685 mapFds.push_back(unique_fd());
686 continue;
687 }
688
689 if ((md[i].ignore_on_eng && isEng()) || (md[i].ignore_on_user && isUser()) ||
690 (md[i].ignore_on_userdebug && isUserdebug())) {
691 ALOGI("skipping map %s which is ignored on %s builds", mapNames[i].c_str(),
692 getBuildType().c_str());
693 mapFds.push_back(unique_fd());
694 continue;
695 }
696
697 if ((isArm() && isKernel32Bit() && md[i].ignore_on_arm32) ||
698 (isArm() && isKernel64Bit() && md[i].ignore_on_aarch64) ||
699 (isX86() && isKernel32Bit() && md[i].ignore_on_x86_32) ||
700 (isX86() && isKernel64Bit() && md[i].ignore_on_x86_64) ||
701 (isRiscV() && md[i].ignore_on_riscv64)) {
702 ALOGI("skipping map %s which is ignored on %s", mapNames[i].c_str(),
703 describeArch());
704 mapFds.push_back(unique_fd());
705 continue;
706 }
707
708 enum bpf_map_type type = md[i].type;
709 if (type == BPF_MAP_TYPE_DEVMAP && !isAtLeastKernelVersion(4, 14, 0)) {
710 // On Linux Kernels older than 4.14 this map type doesn't exist, but it can kind
711 // of be approximated: ARRAY has the same userspace api, though it is not usable
712 // by the same ebpf programs. However, that's okay because the bpf_redirect_map()
713 // helper doesn't exist on 4.9-T anyway (so the bpf program would fail to load,
714 // and thus needs to be tagged as 4.14+ either way), so there's nothing useful you
715 // could do with a DEVMAP anyway (that isn't already provided by an ARRAY)...
716 // Hence using an ARRAY instead of a DEVMAP simply makes life easier for userspace.
717 type = BPF_MAP_TYPE_ARRAY;
718 }
719 if (type == BPF_MAP_TYPE_DEVMAP_HASH && !isAtLeastKernelVersion(5, 4, 0)) {
720 // On Linux Kernels older than 5.4 this map type doesn't exist, but it can kind
721 // of be approximated: HASH has the same userspace visible api.
722 // However it cannot be used by ebpf programs in the same way.
723 // Since bpf_redirect_map() only requires 4.14, a program using a DEVMAP_HASH map
724 // would fail to load (due to trying to redirect to a HASH instead of DEVMAP_HASH).
725 // One must thus tag any BPF_MAP_TYPE_DEVMAP_HASH + bpf_redirect_map() using
726 // programs as being 5.4+...
727 type = BPF_MAP_TYPE_HASH;
728 }
729
730 // The .h file enforces that this is a power of two, and page size will
731 // also always be a power of two, so this logic is actually enough to
732 // force it to be a multiple of the page size, as required by the kernel.
733 unsigned int max_entries = md[i].max_entries;
734 if (type == BPF_MAP_TYPE_RINGBUF) {
735 if (max_entries < page_size) max_entries = page_size;
736 }
737
738 domain selinux_context = getDomainFromSelinuxContext(md[i].selinux_context);
739 if (specified(selinux_context)) {
740 ALOGI("map %s selinux_context [%-32s] -> %d -> '%s' (%s)", mapNames[i].c_str(),
741 md[i].selinux_context, static_cast<int>(selinux_context),
742 lookupSelinuxContext(selinux_context), lookupPinSubdir(selinux_context));
743 }
744
745 domain pin_subdir = getDomainFromPinSubdir(md[i].pin_subdir);
746 if (unrecognized(pin_subdir)) return -ENOTDIR;
747 if (specified(pin_subdir)) {
748 ALOGI("map %s pin_subdir [%-32s] -> %d -> '%s'", mapNames[i].c_str(), md[i].pin_subdir,
749 static_cast<int>(pin_subdir), lookupPinSubdir(pin_subdir));
750 }
751
752 // Format of pin location is /sys/fs/bpf/<pin_subdir|prefix>map_<objName>_<mapName>
753 // except that maps shared across .o's have empty <objName>
754 // Note: <objName> refers to the extension-less basename of the .o file (without @ suffix).
755 string mapPinLoc = string(BPF_FS_PATH) + lookupPinSubdir(pin_subdir, prefix) + "map_" +
756 (md[i].shared ? "" : objName) + "_" + mapNames[i];
757 bool reuse = false;
758 unique_fd fd;
759 int saved_errno;
760
761 if (access(mapPinLoc.c_str(), F_OK) == 0) {
762 fd.reset(mapRetrieveRO(mapPinLoc.c_str()));
763 saved_errno = errno;
764 ALOGD("bpf_create_map reusing map %s, ret: %d", mapNames[i].c_str(), fd.get());
765 reuse = true;
766 } else {
767 union bpf_attr req = {
768 .map_type = type,
769 .key_size = md[i].key_size,
770 .value_size = md[i].value_size,
771 .max_entries = max_entries,
772 .map_flags = md[i].map_flags,
773 };
774 if (isAtLeastKernelVersion(4, 15, 0))
775 strlcpy(req.map_name, mapNames[i].c_str(), sizeof(req.map_name));
776 fd.reset(bpf(BPF_MAP_CREATE, req));
777 saved_errno = errno;
778 ALOGD("bpf_create_map name %s, ret: %d", mapNames[i].c_str(), fd.get());
779 }
780
781 if (!fd.ok()) return -saved_errno;
782
783 // When reusing a pinned map, we need to check the map type/sizes/etc match, but for
784 // safety (since reuse code path is rare) run these checks even if we just created it.
785 // We assume failure is due to pinned map mismatch, hence the 'NOT UNIQUE' return code.
786 if (!mapMatchesExpectations(fd, mapNames[i], md[i], type)) return -ENOTUNIQ;
787
788 if (!reuse) {
789 if (specified(selinux_context)) {
790 string createLoc = string(BPF_FS_PATH) + lookupPinSubdir(selinux_context) +
791 "tmp_map_" + objName + "_" + mapNames[i];
792 ret = bpfFdPin(fd, createLoc.c_str());
793 if (ret) {
794 int err = errno;
795 ALOGE("create %s -> %d [%d:%s]", createLoc.c_str(), ret, err, strerror(err));
796 return -err;
797 }
798 ret = renameat2(AT_FDCWD, createLoc.c_str(),
799 AT_FDCWD, mapPinLoc.c_str(), RENAME_NOREPLACE);
800 if (ret) {
801 int err = errno;
802 ALOGE("rename %s %s -> %d [%d:%s]", createLoc.c_str(), mapPinLoc.c_str(), ret,
803 err, strerror(err));
804 return -err;
805 }
806 } else {
807 ret = bpfFdPin(fd, mapPinLoc.c_str());
808 if (ret) {
809 int err = errno;
810 ALOGE("pin %s -> %d [%d:%s]", mapPinLoc.c_str(), ret, err, strerror(err));
811 return -err;
812 }
813 }
814 ret = chmod(mapPinLoc.c_str(), md[i].mode);
815 if (ret) {
816 int err = errno;
817 ALOGE("chmod(%s, 0%o) = %d [%d:%s]", mapPinLoc.c_str(), md[i].mode, ret, err,
818 strerror(err));
819 return -err;
820 }
821 ret = chown(mapPinLoc.c_str(), (uid_t)md[i].uid, (gid_t)md[i].gid);
822 if (ret) {
823 int err = errno;
824 ALOGE("chown(%s, %u, %u) = %d [%d:%s]", mapPinLoc.c_str(), md[i].uid, md[i].gid,
825 ret, err, strerror(err));
826 return -err;
827 }
828 }
829
830 int mapId = bpfGetFdMapId(fd);
831 if (mapId == -1) {
832 ALOGE("bpfGetFdMapId failed, ret: %d [%d]", mapId, errno);
833 } else {
834 ALOGI("map %s id %d", mapPinLoc.c_str(), mapId);
835 }
836
837 mapFds.push_back(std::move(fd));
838 }
839
840 return ret;
841 }
842
843 /* For debugging, dump all instructions */
dumpIns(char * ins,int size)844 static void dumpIns(char* ins, int size) {
845 for (int row = 0; row < size / 8; row++) {
846 ALOGE("%d: ", row);
847 for (int j = 0; j < 8; j++) {
848 ALOGE("%3x ", ins[(row * 8) + j]);
849 }
850 ALOGE("\n");
851 }
852 }
853
854 /* For debugging, dump all code sections from cs list */
dumpAllCs(vector<codeSection> & cs)855 static void dumpAllCs(vector<codeSection>& cs) {
856 for (int i = 0; i < (int)cs.size(); i++) {
857 ALOGE("Dumping cs %d, name %s", int(i), cs[i].name.c_str());
858 dumpIns((char*)cs[i].data.data(), cs[i].data.size());
859 ALOGE("-----------");
860 }
861 }
862
applyRelo(void * insnsPtr,Elf64_Addr offset,int fd)863 static void applyRelo(void* insnsPtr, Elf64_Addr offset, int fd) {
864 int insnIndex;
865 struct bpf_insn *insn, *insns;
866
867 insns = (struct bpf_insn*)(insnsPtr);
868
869 insnIndex = offset / sizeof(struct bpf_insn);
870 insn = &insns[insnIndex];
871
872 // Occasionally might be useful for relocation debugging, but pretty spammy
873 if (0) {
874 ALOGV("applying relo to instruction at byte offset: %llu, "
875 "insn offset %d, insn %llx",
876 (unsigned long long)offset, insnIndex, *(unsigned long long*)insn);
877 }
878
879 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
880 ALOGE("Dumping all instructions till ins %d", insnIndex);
881 ALOGE("invalid relo for insn %d: code 0x%x", insnIndex, insn->code);
882 dumpIns((char*)insnsPtr, (insnIndex + 3) * 8);
883 return;
884 }
885
886 insn->imm = fd;
887 insn->src_reg = BPF_PSEUDO_MAP_FD;
888 }
889
applyMapRelo(ifstream & elfFile,vector<unique_fd> & mapFds,vector<codeSection> & cs)890 static void applyMapRelo(ifstream& elfFile, vector<unique_fd> &mapFds, vector<codeSection>& cs) {
891 vector<string> mapNames;
892
893 int ret = getSectionSymNames(elfFile, "maps", mapNames);
894 if (ret) return;
895
896 for (int k = 0; k != (int)cs.size(); k++) {
897 Elf64_Rel* rel = (Elf64_Rel*)(cs[k].rel_data.data());
898 int n_rel = cs[k].rel_data.size() / sizeof(*rel);
899
900 for (int i = 0; i < n_rel; i++) {
901 int symIndex = ELF64_R_SYM(rel[i].r_info);
902 string symName;
903
904 ret = getSymNameByIdx(elfFile, symIndex, symName);
905 if (ret) return;
906
907 /* Find the map fd and apply relo */
908 for (int j = 0; j < (int)mapNames.size(); j++) {
909 if (!mapNames[j].compare(symName)) {
910 applyRelo(cs[k].data.data(), rel[i].r_offset, mapFds[j]);
911 break;
912 }
913 }
914 }
915 }
916 }
917
loadCodeSections(const char * elfPath,vector<codeSection> & cs,const string & license,const char * prefix,const unsigned int bpfloader_ver)918 static int loadCodeSections(const char* elfPath, vector<codeSection>& cs, const string& license,
919 const char* prefix, const unsigned int bpfloader_ver) {
920 unsigned kvers = kernelVersion();
921
922 if (!kvers) {
923 ALOGE("unable to get kernel version");
924 return -EINVAL;
925 }
926
927 string objName = pathToObjName(string(elfPath));
928
929 for (int i = 0; i < (int)cs.size(); i++) {
930 unique_fd& fd = cs[i].prog_fd;
931 int ret;
932 string name = cs[i].name;
933
934 if (!cs[i].prog_def.has_value()) {
935 ALOGE("[%d] '%s' missing program definition! bad bpf.o build?", i, name.c_str());
936 return -EINVAL;
937 }
938
939 unsigned min_kver = cs[i].prog_def->min_kver;
940 unsigned max_kver = cs[i].prog_def->max_kver;
941 ALOGD("cs[%d].name:%s min_kver:%x .max_kver:%x (kvers:%x)", i, name.c_str(), min_kver,
942 max_kver, kvers);
943 if (kvers < min_kver) continue;
944 if (kvers >= max_kver) continue;
945
946 unsigned bpfMinVer = cs[i].prog_def->bpfloader_min_ver;
947 unsigned bpfMaxVer = cs[i].prog_def->bpfloader_max_ver;
948 domain selinux_context = getDomainFromSelinuxContext(cs[i].prog_def->selinux_context);
949 domain pin_subdir = getDomainFromPinSubdir(cs[i].prog_def->pin_subdir);
950 // Note: make sure to only check for unrecognized *after* verifying bpfloader
951 // version limits include this bpfloader's version.
952
953 ALOGD("cs[%d].name:%s requires bpfloader version [0x%05x,0x%05x)", i, name.c_str(),
954 bpfMinVer, bpfMaxVer);
955 if (bpfloader_ver < bpfMinVer) continue;
956 if (bpfloader_ver >= bpfMaxVer) continue;
957
958 if ((cs[i].prog_def->ignore_on_eng && isEng()) ||
959 (cs[i].prog_def->ignore_on_user && isUser()) ||
960 (cs[i].prog_def->ignore_on_userdebug && isUserdebug())) {
961 ALOGD("cs[%d].name:%s is ignored on %s builds", i, name.c_str(),
962 getBuildType().c_str());
963 continue;
964 }
965
966 if ((isArm() && isKernel32Bit() && cs[i].prog_def->ignore_on_arm32) ||
967 (isArm() && isKernel64Bit() && cs[i].prog_def->ignore_on_aarch64) ||
968 (isX86() && isKernel32Bit() && cs[i].prog_def->ignore_on_x86_32) ||
969 (isX86() && isKernel64Bit() && cs[i].prog_def->ignore_on_x86_64) ||
970 (isRiscV() && cs[i].prog_def->ignore_on_riscv64)) {
971 ALOGD("cs[%d].name:%s is ignored on %s", i, name.c_str(), describeArch());
972 continue;
973 }
974
975 if (unrecognized(pin_subdir)) return -ENOTDIR;
976
977 if (specified(selinux_context)) {
978 ALOGI("prog %s selinux_context [%-32s] -> %d -> '%s' (%s)", name.c_str(),
979 cs[i].prog_def->selinux_context, static_cast<int>(selinux_context),
980 lookupSelinuxContext(selinux_context), lookupPinSubdir(selinux_context));
981 }
982
983 if (specified(pin_subdir)) {
984 ALOGI("prog %s pin_subdir [%-32s] -> %d -> '%s'", name.c_str(),
985 cs[i].prog_def->pin_subdir, static_cast<int>(pin_subdir),
986 lookupPinSubdir(pin_subdir));
987 }
988
989 // strip any potential $foo suffix
990 // this can be used to provide duplicate programs
991 // conditionally loaded based on running kernel version
992 name = name.substr(0, name.find_last_of('$'));
993
994 bool reuse = false;
995 // Format of pin location is
996 // /sys/fs/bpf/<prefix>prog_<objName>_<progName>
997 string progPinLoc = string(BPF_FS_PATH) + lookupPinSubdir(pin_subdir, prefix) + "prog_" +
998 objName + '_' + string(name);
999 if (access(progPinLoc.c_str(), F_OK) == 0) {
1000 fd.reset(retrieveProgram(progPinLoc.c_str()));
1001 ALOGD("New bpf prog load reusing prog %s, ret: %d (%s)", progPinLoc.c_str(), fd.get(),
1002 (!fd.ok() ? std::strerror(errno) : "no error"));
1003 reuse = true;
1004 } else {
1005 vector<char> log_buf(BPF_LOAD_LOG_SZ, 0);
1006
1007 union bpf_attr req = {
1008 .prog_type = cs[i].type,
1009 .kern_version = kvers,
1010 .license = ptr_to_u64(license.c_str()),
1011 .insns = ptr_to_u64(cs[i].data.data()),
1012 .insn_cnt = static_cast<__u32>(cs[i].data.size() / sizeof(struct bpf_insn)),
1013 .log_level = 1,
1014 .log_buf = ptr_to_u64(log_buf.data()),
1015 .log_size = static_cast<__u32>(log_buf.size()),
1016 .expected_attach_type = cs[i].expected_attach_type,
1017 };
1018 if (isAtLeastKernelVersion(4, 15, 0))
1019 strlcpy(req.prog_name, cs[i].name.c_str(), sizeof(req.prog_name));
1020 fd.reset(bpf(BPF_PROG_LOAD, req));
1021
1022 ALOGD("BPF_PROG_LOAD call for %s (%s) returned fd: %d (%s)", elfPath,
1023 cs[i].name.c_str(), fd.get(), (!fd.ok() ? std::strerror(errno) : "no error"));
1024
1025 if (!fd.ok()) {
1026 vector<string> lines = android::base::Split(log_buf.data(), "\n");
1027
1028 ALOGW("BPF_PROG_LOAD - BEGIN log_buf contents:");
1029 for (const auto& line : lines) ALOGW("%s", line.c_str());
1030 ALOGW("BPF_PROG_LOAD - END log_buf contents.");
1031
1032 if (cs[i].prog_def->optional) {
1033 ALOGW("failed program is marked optional - continuing...");
1034 continue;
1035 }
1036 ALOGE("non-optional program failed to load.");
1037 }
1038 }
1039
1040 if (!fd.ok()) return fd.get();
1041
1042 if (!reuse) {
1043 if (specified(selinux_context)) {
1044 string createLoc = string(BPF_FS_PATH) + lookupPinSubdir(selinux_context) +
1045 "tmp_prog_" + objName + '_' + string(name);
1046 ret = bpfFdPin(fd, createLoc.c_str());
1047 if (ret) {
1048 int err = errno;
1049 ALOGE("create %s -> %d [%d:%s]", createLoc.c_str(), ret, err, strerror(err));
1050 return -err;
1051 }
1052 ret = renameat2(AT_FDCWD, createLoc.c_str(),
1053 AT_FDCWD, progPinLoc.c_str(), RENAME_NOREPLACE);
1054 if (ret) {
1055 int err = errno;
1056 ALOGE("rename %s %s -> %d [%d:%s]", createLoc.c_str(), progPinLoc.c_str(), ret,
1057 err, strerror(err));
1058 return -err;
1059 }
1060 } else {
1061 ret = bpfFdPin(fd, progPinLoc.c_str());
1062 if (ret) {
1063 int err = errno;
1064 ALOGE("create %s -> %d [%d:%s]", progPinLoc.c_str(), ret, err, strerror(err));
1065 return -err;
1066 }
1067 }
1068 if (chmod(progPinLoc.c_str(), 0440)) {
1069 int err = errno;
1070 ALOGE("chmod %s 0440 -> [%d:%s]", progPinLoc.c_str(), err, strerror(err));
1071 return -err;
1072 }
1073 if (chown(progPinLoc.c_str(), (uid_t)cs[i].prog_def->uid,
1074 (gid_t)cs[i].prog_def->gid)) {
1075 int err = errno;
1076 ALOGE("chown %s %d %d -> [%d:%s]", progPinLoc.c_str(), cs[i].prog_def->uid,
1077 cs[i].prog_def->gid, err, strerror(err));
1078 return -err;
1079 }
1080 }
1081
1082 int progId = bpfGetFdProgId(fd);
1083 if (progId == -1) {
1084 ALOGE("bpfGetFdProgId failed, ret: %d [%d]", progId, errno);
1085 } else {
1086 ALOGI("prog %s id %d", progPinLoc.c_str(), progId);
1087 }
1088 }
1089
1090 return 0;
1091 }
1092
loadProg(const char * const elfPath,bool * const isCritical,const unsigned int bpfloader_ver,const Location & location)1093 int loadProg(const char* const elfPath, bool* const isCritical, const unsigned int bpfloader_ver,
1094 const Location& location) {
1095 vector<char> license;
1096 vector<char> critical;
1097 vector<codeSection> cs;
1098 vector<unique_fd> mapFds;
1099 int ret;
1100
1101 if (!isCritical) return -1;
1102 *isCritical = false;
1103
1104 ifstream elfFile(elfPath, ios::in | ios::binary);
1105 if (!elfFile.is_open()) return -1;
1106
1107 ret = readSectionByName("critical", elfFile, critical);
1108 *isCritical = !ret;
1109
1110 ret = readSectionByName("license", elfFile, license);
1111 if (ret) {
1112 ALOGE("Couldn't find license in %s", elfPath);
1113 return ret;
1114 } else {
1115 ALOGD("Loading %s%s ELF object %s with license %s",
1116 *isCritical ? "critical for " : "optional", *isCritical ? (char*)critical.data() : "",
1117 elfPath, (char*)license.data());
1118 }
1119
1120 // the following default values are for bpfloader V0.0 format which does not include them
1121 unsigned int bpfLoaderMinVer =
1122 readSectionUint("bpfloader_min_ver", elfFile, DEFAULT_BPFLOADER_MIN_VER);
1123 unsigned int bpfLoaderMaxVer =
1124 readSectionUint("bpfloader_max_ver", elfFile, DEFAULT_BPFLOADER_MAX_VER);
1125 unsigned int bpfLoaderMinRequiredVer =
1126 readSectionUint("bpfloader_min_required_ver", elfFile, 0);
1127 unsigned int netBpfLoadMinVer =
1128 readSectionUint("netbpfload_min_ver", elfFile, 0);
1129 size_t sizeOfBpfMapDef =
1130 readSectionUint("size_of_bpf_map_def", elfFile, DEFAULT_SIZEOF_BPF_MAP_DEF);
1131 size_t sizeOfBpfProgDef =
1132 readSectionUint("size_of_bpf_prog_def", elfFile, DEFAULT_SIZEOF_BPF_PROG_DEF);
1133
1134 // temporary hack to enable gentle enablement of mainline NetBpfLoad
1135 if (bpfloader_ver < netBpfLoadMinVer) {
1136 ALOGI("NetBpfLoad version %d ignoring ELF object %s with netbpfload min ver %d",
1137 bpfloader_ver, elfPath, netBpfLoadMinVer);
1138 return 0;
1139 }
1140
1141 // inclusive lower bound check
1142 if (bpfloader_ver < bpfLoaderMinVer) {
1143 ALOGI("BpfLoader version 0x%05x ignoring ELF object %s with min ver 0x%05x",
1144 bpfloader_ver, elfPath, bpfLoaderMinVer);
1145 return 0;
1146 }
1147
1148 // exclusive upper bound check
1149 if (bpfloader_ver >= bpfLoaderMaxVer) {
1150 ALOGI("BpfLoader version 0x%05x ignoring ELF object %s with max ver 0x%05x",
1151 bpfloader_ver, elfPath, bpfLoaderMaxVer);
1152 return 0;
1153 }
1154
1155 if (bpfloader_ver < bpfLoaderMinRequiredVer) {
1156 ALOGI("BpfLoader version 0x%05x failing due to ELF object %s with required min ver 0x%05x",
1157 bpfloader_ver, elfPath, bpfLoaderMinRequiredVer);
1158 return -1;
1159 }
1160
1161 ALOGI("BpfLoader version 0x%05x processing ELF object %s with ver [0x%05x,0x%05x)",
1162 bpfloader_ver, elfPath, bpfLoaderMinVer, bpfLoaderMaxVer);
1163
1164 if (sizeOfBpfMapDef < DEFAULT_SIZEOF_BPF_MAP_DEF) {
1165 ALOGE("sizeof(bpf_map_def) of %zu is too small (< %d)", sizeOfBpfMapDef,
1166 DEFAULT_SIZEOF_BPF_MAP_DEF);
1167 return -1;
1168 }
1169
1170 if (sizeOfBpfProgDef < DEFAULT_SIZEOF_BPF_PROG_DEF) {
1171 ALOGE("sizeof(bpf_prog_def) of %zu is too small (< %d)", sizeOfBpfProgDef,
1172 DEFAULT_SIZEOF_BPF_PROG_DEF);
1173 return -1;
1174 }
1175
1176 ret = readCodeSections(elfFile, cs, sizeOfBpfProgDef);
1177 if (ret) {
1178 ALOGE("Couldn't read all code sections in %s", elfPath);
1179 return ret;
1180 }
1181
1182 /* Just for future debugging */
1183 if (0) dumpAllCs(cs);
1184
1185 ret = createMaps(elfPath, elfFile, mapFds, location.prefix, sizeOfBpfMapDef, bpfloader_ver);
1186 if (ret) {
1187 ALOGE("Failed to create maps: (ret=%d) in %s", ret, elfPath);
1188 return ret;
1189 }
1190
1191 for (int i = 0; i < (int)mapFds.size(); i++)
1192 ALOGV("map_fd found at %d is %d in %s", i, mapFds[i].get(), elfPath);
1193
1194 applyMapRelo(elfFile, mapFds, cs);
1195
1196 ret = loadCodeSections(elfPath, cs, string(license.data()), location.prefix, bpfloader_ver);
1197 if (ret) ALOGE("Failed to load programs, loadCodeSections ret=%d", ret);
1198
1199 return ret;
1200 }
1201
1202 } // namespace bpf
1203 } // namespace android
1204