1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // The resulting .o needs to load on Android T+
18 #define BPFLOADER_MIN_VER BPFLOADER_T_VERSION
19 
20 #include <bpf_helpers.h>
21 #include <linux/bpf.h>
22 #include <linux/if.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_packet.h>
25 #include <linux/in.h>
26 #include <linux/in6.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <linux/pkt_cls.h>
30 #include <linux/tcp.h>
31 #include <stdbool.h>
32 #include <stdint.h>
33 #include "bpf_net_helpers.h"
34 #include "netd.h"
35 
36 // This is defined for cgroup bpf filter only.
37 static const int DROP = 0;
38 static const int PASS = 1;
39 static const int DROP_UNLESS_DNS = 2;  // internal to our program
40 
41 // This is used for xt_bpf program only.
42 static const int BPF_NOMATCH = 0;
43 static const int BPF_MATCH = 1;
44 
45 // Used for 'bool enable_tracing'
46 static const bool TRACE_ON = true;
47 static const bool TRACE_OFF = false;
48 
49 // offsetof(struct iphdr, ihl) -- but that's a bitfield
50 #define IPPROTO_IHL_OFF 0
51 
52 // This is offsetof(struct tcphdr, "32 bit tcp flag field")
53 // The tcp flags are after be16 source, dest & be32 seq, ack_seq, hence 12 bytes in.
54 //
55 // Note that TCP_FLAG_{ACK,PSH,RST,SYN,FIN} are htonl(0x00{10,08,04,02,01}0000)
56 // see include/uapi/linux/tcp.h
57 #define TCP_FLAG32_OFF 12
58 
59 #define TCP_FLAG8_OFF (TCP_FLAG32_OFF + 1)
60 
61 // For maps netd does not need to access
62 #define DEFINE_BPF_MAP_NO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
63     DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries,         \
64                        AID_ROOT, AID_NET_BW_ACCT, 0060, "fs_bpf_net_shared", "",   \
65                        PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,              \
66                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
67 
68 // For maps netd only needs read only access to
69 #define DEFINE_BPF_MAP_RO_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries)  \
70     DEFINE_BPF_MAP_EXT(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries,          \
71                        AID_ROOT, AID_NET_BW_ACCT, 0460, "fs_bpf_netd_readonly", "", \
72                        PRIVATE, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER,               \
73                        LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
74 
75 // For maps netd needs to be able to read and write
76 #define DEFINE_BPF_MAP_RW_NETD(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
77     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, \
78                        AID_ROOT, AID_NET_BW_ACCT, 0660)
79 
80 // Bpf map arrays on creation are preinitialized to 0 and do not support deletion of a key,
81 // see: kernel/bpf/arraymap.c array_map_delete_elem() returns -EINVAL (from both syscall and ebpf)
82 // Additionally on newer kernels the bpf jit can optimize out the lookups.
83 // only valid indexes are [0..CONFIGURATION_MAP_SIZE-1]
84 DEFINE_BPF_MAP_RO_NETD(configuration_map, ARRAY, uint32_t, uint32_t, CONFIGURATION_MAP_SIZE)
85 
86 // TODO: consider whether we can merge some of these maps
87 // for example it might be possible to merge 2 or 3 of:
88 //   uid_counterset_map + uid_owner_map + uid_permission_map
89 DEFINE_BPF_MAP_RW_NETD(cookie_tag_map, HASH, uint64_t, UidTagValue, COOKIE_UID_MAP_SIZE)
90 DEFINE_BPF_MAP_NO_NETD(uid_counterset_map, HASH, uint32_t, uint8_t, UID_COUNTERSET_MAP_SIZE)
91 DEFINE_BPF_MAP_NO_NETD(app_uid_stats_map, HASH, uint32_t, StatsValue, APP_STATS_MAP_SIZE)
92 DEFINE_BPF_MAP_RO_NETD(stats_map_A, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
93 DEFINE_BPF_MAP_RO_NETD(stats_map_B, HASH, StatsKey, StatsValue, STATS_MAP_SIZE)
94 DEFINE_BPF_MAP_NO_NETD(iface_stats_map, HASH, uint32_t, StatsValue, IFACE_STATS_MAP_SIZE)
95 DEFINE_BPF_MAP_RO_NETD(uid_owner_map, HASH, uint32_t, UidOwnerValue, UID_OWNER_MAP_SIZE)
96 DEFINE_BPF_MAP_RO_NETD(uid_permission_map, HASH, uint32_t, uint8_t, UID_OWNER_MAP_SIZE)
97 DEFINE_BPF_MAP_NO_NETD(ingress_discard_map, HASH, IngressDiscardKey, IngressDiscardValue,
98                        INGRESS_DISCARD_MAP_SIZE)
99 
100 DEFINE_BPF_MAP_RW_NETD(lock_array_test_map, ARRAY, uint32_t, bool, 1)
101 DEFINE_BPF_MAP_RW_NETD(lock_hash_test_map, HASH, uint32_t, bool, 1)
102 
103 /* never actually used from ebpf */
104 DEFINE_BPF_MAP_NO_NETD(iface_index_name_map, HASH, uint32_t, IfaceValue, IFACE_INDEX_NAME_MAP_SIZE)
105 
106 // A single-element configuration array, packet tracing is enabled when 'true'.
107 DEFINE_BPF_MAP_EXT(packet_trace_enabled_map, ARRAY, uint32_t, bool, 1,
108                    AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
109                    BPFLOADER_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
110                    LOAD_ON_USER, LOAD_ON_USERDEBUG)
111 
112 // A ring buffer on which packet information is pushed.
113 DEFINE_BPF_RINGBUF_EXT(packet_trace_ringbuf, PacketTrace, PACKET_TRACE_BUF_SIZE,
114                        AID_ROOT, AID_SYSTEM, 0060, "fs_bpf_net_shared", "", PRIVATE,
115                        BPFLOADER_U_VERSION, BPFLOADER_MAX_VER, LOAD_ON_ENG,
116                        LOAD_ON_USER, LOAD_ON_USERDEBUG);
117 
DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map,ARRAY,uint32_t,bool,DATA_SAVER_ENABLED_MAP_SIZE)118 DEFINE_BPF_MAP_RO_NETD(data_saver_enabled_map, ARRAY, uint32_t, bool,
119                        DATA_SAVER_ENABLED_MAP_SIZE)
120 
121 // iptables xt_bpf programs need to be usable by both netd and netutils_wrappers
122 // selinux contexts, because even non-xt_bpf iptables mutations are implemented as
123 // a full table dump, followed by an update in userspace, and then a reload into the kernel,
124 // where any already in-use xt_bpf matchers are serialized as the path to the pinned
125 // program (see XT_BPF_MODE_PATH_PINNED) and then the iptables binary (or rather
126 // the kernel acting on behalf of it) must be able to retrieve the pinned program
127 // for the reload to succeed
128 #define DEFINE_XTBPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
129     DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog)
130 
131 // programs that need to be usable by netd, but not by netutils_wrappers
132 // (this is because these are currently attached by the mainline provided libnetd_updatable .so
133 // which is loaded into netd and thus runs as netd uid/gid/selinux context)
134 #define DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV, maxKV) \
135     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog,                               \
136                         minKV, maxKV, BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY,            \
137                         "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
138 
139 #define DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv) \
140     DEFINE_NETD_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF)
141 
142 #define DEFINE_NETD_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
143     DEFINE_NETD_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE)
144 
145 #define DEFINE_NETD_V_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV)            \
146     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, minKV,                        \
147                         KVER_INF, BPFLOADER_MAINLINE_V_VERSION, BPFLOADER_MAX_VER, MANDATORY,     \
148                         "fs_bpf_netd_readonly", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
149 
150 // programs that only need to be usable by the system server
151 #define DEFINE_SYS_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
152     DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF,  \
153                         BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
154                         "fs_bpf_net_shared", "", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
155 
156 /*
157  * Note: this blindly assumes an MTU of 1500, and that packets > MTU are always TCP,
158  * and that TCP is using the Linux default settings with TCP timestamp option enabled
159  * which uses 12 TCP option bytes per frame.
160  *
161  * These are not unreasonable assumptions:
162  *
163  * The internet does not really support MTUs greater than 1500, so most TCP traffic will
164  * be at that MTU, or slightly below it (worst case our upwards adjustment is too small).
165  *
166  * The chance our traffic isn't IP at all is basically zero, so the IP overhead correction
167  * is bound to be needed.
168  *
169  * Furthermore, the likelyhood that we're having to deal with GSO (ie. > MTU) packets that
170  * are not IP/TCP is pretty small (few other things are supported by Linux) and worse case
171  * our extra overhead will be slightly off, but probably still better than assuming none.
172  *
173  * Most servers are also Linux and thus support/default to using TCP timestamp option
174  * (and indeed TCP timestamp option comes from RFC 1323 titled "TCP Extensions for High
175  * Performance" which also defined TCP window scaling and are thus absolutely ancient...).
176  *
177  * All together this should be more correct than if we simply ignored GSO frames
178  * (ie. counted them as single packets with no extra overhead)
179  *
180  * Especially since the number of packets is important for any future clat offload correction.
181  * (which adjusts upward by 20 bytes per packet to account for ipv4 -> ipv6 header conversion)
182  */
183 #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey)                                            \
184     static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
185                                                               const TypeOfKey* const key,        \
186                                                               const struct egress_bool egress,   \
187                                                               const struct kver_uint kver) {     \
188         StatsValue* value = bpf_##the_stats_map##_lookup_elem(key);                              \
189         if (!value) {                                                                            \
190             StatsValue newValue = {};                                                            \
191             bpf_##the_stats_map##_update_elem(key, &newValue, BPF_NOEXIST);                      \
192             value = bpf_##the_stats_map##_lookup_elem(key);                                      \
193         }                                                                                        \
194         if (value) {                                                                             \
195             const int mtu = 1500;                                                                \
196             uint64_t packets = 1;                                                                \
197             uint64_t bytes = skb->len;                                                           \
198             if (bytes > mtu) {                                                                   \
199                 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6));                             \
200                 int ip_overhead = (is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr));     \
201                 int tcp_overhead = ip_overhead + sizeof(struct tcphdr) + 12;                     \
202                 int mss = mtu - tcp_overhead;                                                    \
203                 uint64_t payload = bytes - tcp_overhead;                                         \
204                 packets = (payload + mss - 1) / mss;                                             \
205                 bytes = tcp_overhead * packets + payload;                                        \
206             }                                                                                    \
207             if (egress.egress) {                                                                 \
208                 __sync_fetch_and_add(&value->txPackets, packets);                                \
209                 __sync_fetch_and_add(&value->txBytes, bytes);                                    \
210             } else {                                                                             \
211                 __sync_fetch_and_add(&value->rxPackets, packets);                                \
212                 __sync_fetch_and_add(&value->rxBytes, bytes);                                    \
213             }                                                                                    \
214         }                                                                                        \
215     }
216 
217 DEFINE_UPDATE_STATS(app_uid_stats_map, uint32_t)
218 DEFINE_UPDATE_STATS(iface_stats_map, uint32_t)
219 DEFINE_UPDATE_STATS(stats_map_A, StatsKey)
220 DEFINE_UPDATE_STATS(stats_map_B, StatsKey)
221 
222 // both of these return 0 on success or -EFAULT on failure (and zero out the buffer)
223 static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* const skb,
224                                                          const int L3_off,
225                                                          void* const to,
226                                                          const int len,
227                                                          const struct kver_uint kver) {
228     // 'kver' (here and throughout) is the compile time guaranteed minimum kernel version,
229     // ie. we're building (a version of) the bpf program for kver (or newer!) kernels.
230     //
231     // 4.19+ kernels support the 'bpf_skb_load_bytes_relative()' bpf helper function,
232     // so we can use it.  On pre-4.19 kernels we cannot use the relative load helper,
233     // and thus will simply get things wrong if there's any L2 (ethernet) header in the skb.
234     //
235     // Luckily, for cellular traffic, there likely isn't any, as cell is usually 'rawip'.
236     //
237     // However, this does mean that wifi (and ethernet) on 4.14 is basically a lost cause:
238     // we'll be making decisions based on the *wrong* bytes (fetched from the wrong offset),
239     // because the 'L3_off' passed to bpf_skb_load_bytes() should be increased by l2_header_size,
240     // which for ethernet is 14 and not 0 like it is for rawip.
241     //
242     // For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels,
243     // since those extend the ethernet header from 14 to 18 bytes.
244     return KVER_IS_AT_LEAST(kver, 4, 19, 0)
245         ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
246         : bpf_skb_load_bytes(skb, L3_off, to, len);
247 }
248 
do_packet_tracing(const struct __sk_buff * const skb,const struct egress_bool egress,const uint32_t uid,const uint32_t tag,const bool enable_tracing,const struct kver_uint kver)249 static __always_inline inline void do_packet_tracing(
250         const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid,
251         const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) {
252     if (!enable_tracing) return;
253     if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return;
254 
255     uint32_t mapKey = 0;
256     bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey);
257     if (traceConfig == NULL) return;
258     if (*traceConfig == false) return;
259 
260     PacketTrace* pkt = bpf_packet_trace_ringbuf_reserve();
261     if (pkt == NULL) return;
262 
263     // Errors from bpf_skb_load_bytes_net are ignored to favor returning something
264     // over returning nothing. In the event of an error, the kernel will fill in
265     // zero for the destination memory. Do not change the default '= 0' below.
266 
267     uint8_t proto = 0;
268     uint8_t L4_off = 0;
269     uint8_t ipVersion = 0;
270     if (skb->protocol == htons(ETH_P_IP)) {
271         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver);
272         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &L4_off, sizeof(L4_off), kver);
273         L4_off = (L4_off & 0x0F) * 4;  // IHL calculation.
274         ipVersion = 4;
275     } else if (skb->protocol == htons(ETH_P_IPV6)) {
276         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver);
277         L4_off = sizeof(struct ipv6hdr);
278         ipVersion = 6;
279         // skip over a *single* HOPOPTS or DSTOPTS extension header (if present)
280         if (proto == IPPROTO_HOPOPTS || proto == IPPROTO_DSTOPTS) {
281             struct {
282                 uint8_t proto, len;
283             } ext_hdr;
284             if (!bpf_skb_load_bytes_net(skb, L4_off, &ext_hdr, sizeof(ext_hdr), kver)) {
285                 proto = ext_hdr.proto;
286                 L4_off += (ext_hdr.len + 1) * 8;
287             }
288         }
289     }
290 
291     uint8_t flags = 0;
292     __be16 sport = 0, dport = 0;
293     if (L4_off >= 20) {
294       switch (proto) {
295         case IPPROTO_TCP:
296           (void)bpf_skb_load_bytes_net(skb, L4_off + TCP_FLAG8_OFF, &flags, sizeof(flags), kver);
297           // fallthrough
298         case IPPROTO_DCCP:
299         case IPPROTO_UDP:
300         case IPPROTO_UDPLITE:
301         case IPPROTO_SCTP:
302           // all of these L4 protocols start with be16 src & dst port
303           (void)bpf_skb_load_bytes_net(skb, L4_off + 0, &sport, sizeof(sport), kver);
304           (void)bpf_skb_load_bytes_net(skb, L4_off + 2, &dport, sizeof(dport), kver);
305           break;
306         case IPPROTO_ICMP:
307         case IPPROTO_ICMPV6:
308           // Both IPv4 and IPv6 icmp start with u8 type & code, which we store in the bottom
309           // (ie. second) byte of sport/dport (which are be16s), the top byte is already zero.
310           (void)bpf_skb_load_bytes_net(skb, L4_off + 0, (char *)&sport + 1, 1, kver); //type
311           (void)bpf_skb_load_bytes_net(skb, L4_off + 1, (char *)&dport + 1, 1, kver); //code
312           break;
313       }
314     }
315 
316     pkt->timestampNs = bpf_ktime_get_boot_ns();
317     pkt->ifindex = skb->ifindex;
318     pkt->length = skb->len;
319 
320     pkt->uid = uid;
321     pkt->tag = tag;
322     pkt->sport = sport;
323     pkt->dport = dport;
324 
325     pkt->egress = egress.egress;
326     pkt->wakeup = !egress.egress && (skb->mark & 0x80000000);  // Fwmark.ingress_cpu_wakeup
327     pkt->ipProto = proto;
328     pkt->tcpFlags = flags;
329     pkt->ipVersion = ipVersion;
330 
331     bpf_packet_trace_ringbuf_submit(pkt);
332 }
333 
skip_owner_match(struct __sk_buff * skb,const struct egress_bool egress,const struct kver_uint kver)334 static __always_inline inline bool skip_owner_match(struct __sk_buff* skb,
335                                                     const struct egress_bool egress,
336                                                     const struct kver_uint kver) {
337     uint32_t flag = 0;
338     if (skb->protocol == htons(ETH_P_IP)) {
339         uint8_t proto;
340         // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
341         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver);
342         if (proto == IPPROTO_ESP) return true;
343         if (proto != IPPROTO_TCP) return false;  // handles read failure above
344         uint8_t ihl;
345         // we don't check for success, as this cannot fail, as it is earlier in the packet than
346         // proto, the reading of which must have succeeded, additionally the next read
347         // (a little bit deeper in the packet in spite of ihl being zeroed) of the tcp flags
348         // field will also fail, and that failure we already handle correctly
349         // (we also don't check that ihl in [0x45,0x4F] nor that ipv4 header checksum is correct)
350         (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver);
351         // if the read below fails, we'll just assume no TCP flags are set, which is fine.
352         (void)bpf_skb_load_bytes_net(skb, (ihl & 0xF) * 4 + TCP_FLAG32_OFF,
353                                      &flag, sizeof(flag), kver);
354     } else if (skb->protocol == htons(ETH_P_IPV6)) {
355         uint8_t proto;
356         // no need to check for success, proto will be zeroed if bpf_skb_load_bytes_net() fails
357         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver);
358         if (proto == IPPROTO_ESP) return true;
359         if (proto != IPPROTO_TCP) return false;  // handles read failure above
360         // if the read below fails, we'll just assume no TCP flags are set, which is fine.
361         (void)bpf_skb_load_bytes_net(skb, sizeof(struct ipv6hdr) + TCP_FLAG32_OFF,
362                                      &flag, sizeof(flag), kver);
363     } else {
364         return false;
365     }
366     // Always allow RST's, and additionally allow ingress FINs
367     return flag & (TCP_FLAG_RST | (egress.egress ? 0 : TCP_FLAG_FIN));  // false on read failure
368 }
369 
getConfig(uint32_t configKey)370 static __always_inline inline BpfConfig getConfig(uint32_t configKey) {
371     uint32_t mapSettingKey = configKey;
372     BpfConfig* config = bpf_configuration_map_lookup_elem(&mapSettingKey);
373     if (!config) {
374         // Couldn't read configuration entry. Assume everything is disabled.
375         return DEFAULT_CONFIG;
376     }
377     return *config;
378 }
379 
ingress_should_discard(struct __sk_buff * skb,const struct kver_uint kver)380 static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
381                                                           const struct kver_uint kver) {
382     // Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which
383     // provides relative to L3 header reads.  Without that we could fetch the wrong bytes.
384     // Additionally earlier bpf verifiers are much harder to please.
385     if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false;
386 
387     IngressDiscardKey k = {};
388     if (skb->protocol == htons(ETH_P_IP)) {
389         k.daddr.s6_addr32[2] = htonl(0xFFFF);
390         (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(daddr), &k.daddr.s6_addr32[3], 4, kver);
391     } else if (skb->protocol == htons(ETH_P_IPV6)) {
392         (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(daddr), &k.daddr, sizeof(k.daddr), kver);
393     } else {
394         return false; // non IPv4/IPv6, so no IP to match on
395     }
396 
397     // we didn't check for load success, because destination bytes will be zeroed if
398     // bpf_skb_load_bytes_net() fails, instead we rely on daddr of '::' and '::ffff:0.0.0.0'
399     // never being present in the map itself
400 
401     IngressDiscardValue* v = bpf_ingress_discard_map_lookup_elem(&k);
402     if (!v) return false;  // lookup failure -> no protection in place -> allow
403     // if (skb->ifindex == 1) return false;  // allow 'lo', but can't happen - see callsite
404     if (skb->ifindex == v->iif[0]) return false;  // allowed interface
405     if (skb->ifindex == v->iif[1]) return false;  // allowed interface
406     return true;  // disallowed interface
407 }
408 
bpf_owner_match(struct __sk_buff * skb,uint32_t uid,const struct egress_bool egress,const struct kver_uint kver)409 static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
410                                                   const struct egress_bool egress,
411                                                   const struct kver_uint kver) {
412     if (is_system_uid(uid)) return PASS;
413 
414     if (skip_owner_match(skb, egress, kver)) return PASS;
415 
416     BpfConfig enabledRules = getConfig(UID_RULES_CONFIGURATION_KEY);
417 
418     // BACKGROUND match does not apply to loopback traffic
419     if (skb->ifindex == 1) enabledRules &= ~BACKGROUND_MATCH;
420 
421     UidOwnerValue* uidEntry = bpf_uid_owner_map_lookup_elem(&uid);
422     uint32_t uidRules = uidEntry ? uidEntry->rule : 0;
423     uint32_t allowed_iif = uidEntry ? uidEntry->iif : 0;
424 
425     if (isBlockedByUidRules(enabledRules, uidRules)) return DROP;
426 
427     if (!egress.egress && skb->ifindex != 1) {
428         if (ingress_should_discard(skb, kver)) return DROP;
429         if (uidRules & IIF_MATCH) {
430             if (allowed_iif && skb->ifindex != allowed_iif) {
431                 // Drops packets not coming from lo nor the allowed interface
432                 // allowed interface=0 is a wildcard and does not drop packets
433                 return DROP_UNLESS_DNS;
434             }
435         } else if (uidRules & LOCKDOWN_VPN_MATCH) {
436             // Drops packets not coming from lo and rule does not have IIF_MATCH but has
437             // LOCKDOWN_VPN_MATCH
438             return DROP_UNLESS_DNS;
439         }
440     }
441     return PASS;
442 }
443 
update_stats_with_config(const uint32_t selectedMap,const struct __sk_buff * const skb,const StatsKey * const key,const struct egress_bool egress,const struct kver_uint kver)444 static __always_inline inline void update_stats_with_config(const uint32_t selectedMap,
445                                                             const struct __sk_buff* const skb,
446                                                             const StatsKey* const key,
447                                                             const struct egress_bool egress,
448                                                             const struct kver_uint kver) {
449     if (selectedMap == SELECT_MAP_A) {
450         update_stats_map_A(skb, key, egress, kver);
451     } else {
452         update_stats_map_B(skb, key, egress, kver);
453     }
454 }
455 
bpf_traffic_account(struct __sk_buff * skb,const struct egress_bool egress,const bool enable_tracing,const struct kver_uint kver)456 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb,
457                                                       const struct egress_bool egress,
458                                                       const bool enable_tracing,
459                                                       const struct kver_uint kver) {
460     // sock_uid will be 'overflowuid' if !sk_fullsock(sk_to_full_sk(skb->sk))
461     uint32_t sock_uid = bpf_get_socket_uid(skb);
462 
463     // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid,
464     // usually this being returned means that skb->sk is NULL during RX
465     // (early decap socket lookup failure), which commonly happens for incoming
466     // packets to an unconnected udp socket.
467     // But it can also happen for egress from a timewait socket.
468     // Let's treat such cases as 'root' which is_system_uid()
469     if (sock_uid == 65534) sock_uid = 0;
470 
471     uint64_t cookie = bpf_get_socket_cookie(skb);  // 0 iff !skb->sk
472     UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
473     uint32_t uid, tag;
474     if (utag) {
475         uid = utag->uid;
476         tag = utag->tag;
477     } else {
478         uid = sock_uid;
479         tag = 0;
480     }
481 
482     // Always allow and never count clat traffic. Only the IPv4 traffic on the stacked
483     // interface is accounted for and subject to usage restrictions.
484     // CLAT IPv6 TX sockets are *always* tagged with CLAT uid, see tagSocketAsClat()
485     // CLAT daemon receives via an untagged AF_PACKET socket.
486     if (egress.egress && uid == AID_CLAT) return PASS;
487 
488     int match = bpf_owner_match(skb, sock_uid, egress, kver);
489 
490 // Workaround for secureVPN with VpnIsolation enabled, refer to b/159994981 for details.
491 // Keep TAG_SYSTEM_DNS in sync with DnsResolver/include/netd_resolv/resolv.h
492 // and TrafficStatsConstants.java
493 #define TAG_SYSTEM_DNS 0xFFFFFF82
494     if (tag == TAG_SYSTEM_DNS && uid == AID_DNS) {
495         uid = sock_uid;
496         if (match == DROP_UNLESS_DNS) match = PASS;
497     } else {
498         if (match == DROP_UNLESS_DNS) match = DROP;
499     }
500 
501     // If an outbound packet is going to be dropped, we do not count that traffic.
502     if (egress.egress && (match == DROP)) return DROP;
503 
504     StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};
505 
506     uint8_t* counterSet = bpf_uid_counterset_map_lookup_elem(&uid);
507     if (counterSet) key.counterSet = (uint32_t)*counterSet;
508 
509     uint32_t mapSettingKey = CURRENT_STATS_MAP_CONFIGURATION_KEY;
510     uint32_t* selectedMap = bpf_configuration_map_lookup_elem(&mapSettingKey);
511 
512     if (!selectedMap) return PASS;  // cannot happen, needed to keep bpf verifier happy
513 
514     do_packet_tracing(skb, egress, uid, tag, enable_tracing, kver);
515     update_stats_with_config(*selectedMap, skb, &key, egress, kver);
516     update_app_uid_stats_map(skb, &uid, egress, kver);
517 
518     // We've already handled DROP_UNLESS_DNS up above, thus when we reach here the only
519     // possible values of match are DROP(0) or PASS(1), however we need to use
520     // "match &= 1" before 'return match' to help the kernel's bpf verifier,
521     // so that it can be 100% certain that the returned value is always 0 or 1.
522     // We use assembly so that it cannot be optimized out by a too smart compiler.
523     asm("%0 &= 1" : "+r"(match));
524     return match;
525 }
526 
527 // This program is optional, and enables tracing on Android U+, 5.8+ on user builds.
528 DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace_user", AID_ROOT, AID_SYSTEM,
529                     bpf_cgroup_ingress_trace_user, KVER_5_8, KVER_INF,
530                     BPFLOADER_U_VERSION, BPFLOADER_MAX_VER, OPTIONAL,
531                     "fs_bpf_netd_readonly", "",
532                     IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG)
533 (struct __sk_buff* skb) {
534     return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
535 }
536 
537 // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng.
538 DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM,
539                     bpf_cgroup_ingress_trace, KVER_5_8, KVER_INF,
540                     BPFLOADER_U_VERSION, BPFLOADER_MAX_VER, MANDATORY,
541                     "fs_bpf_netd_readonly", "",
542                     LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
543 (struct __sk_buff* skb) {
544     return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
545 }
546 
547 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM,
548                                 bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF)
549 (struct __sk_buff* skb) {
550     return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19);
551 }
552 
553 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM,
554                                 bpf_cgroup_ingress_4_14, KVER_NONE, KVER_4_19)
555 (struct __sk_buff* skb) {
556     return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE);
557 }
558 
559 // This program is optional, and enables tracing on Android U+, 5.8+ on user builds.
560 DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace_user", AID_ROOT, AID_SYSTEM,
561                     bpf_cgroup_egress_trace_user, KVER_5_8, KVER_INF,
562                     BPFLOADER_U_VERSION, BPFLOADER_MAX_VER, OPTIONAL,
563                     "fs_bpf_netd_readonly", "",
564                     IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG)
565 (struct __sk_buff* skb) {
566     return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
567 }
568 
569 // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng.
570 DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM,
571                     bpf_cgroup_egress_trace, KVER_5_8, KVER_INF,
572                     BPFLOADER_U_VERSION, BPFLOADER_MAX_VER, MANDATORY,
573                     "fs_bpf_netd_readonly", "",
574                     LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
575 (struct __sk_buff* skb) {
576     return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
577 }
578 
579 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM,
580                                 bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF)
581 (struct __sk_buff* skb) {
582     return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19);
583 }
584 
585 DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM,
586                                 bpf_cgroup_egress_4_14, KVER_NONE, KVER_4_19)
587 (struct __sk_buff* skb) {
588     return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE);
589 }
590 
591 // WARNING: Android T's non-updatable netd depends on the name of this program.
592 DEFINE_XTBPF_PROG("skfilter/egress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_egress_prog)
593 (struct __sk_buff* skb) {
594     // Clat daemon does not generate new traffic, all its traffic is accounted for already
595     // on the v4-* interfaces (except for the 20 (or 28) extra bytes of IPv6 vs IPv4 overhead,
596     // but that can be corrected for later when merging v4-foo stats into interface foo's).
597     // CLAT sockets are created by system server and tagged as uid CLAT, see tagSocketAsClat()
598     uint32_t sock_uid = bpf_get_socket_uid(skb);
599     if (sock_uid == AID_SYSTEM) {
600         uint64_t cookie = bpf_get_socket_cookie(skb);
601         UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
602         if (utag && utag->uid == AID_CLAT) return BPF_NOMATCH;
603     }
604 
605     uint32_t key = skb->ifindex;
606     update_iface_stats_map(skb, &key, EGRESS, KVER_NONE);
607     return BPF_MATCH;
608 }
609 
610 // WARNING: Android T's non-updatable netd depends on the name of this program.
611 DEFINE_XTBPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingress_prog)
612 (struct __sk_buff* skb) {
613     // Clat daemon traffic is not accounted by virtue of iptables raw prerouting drop rule
614     // (in clat_raw_PREROUTING chain), which triggers before this (in bw_raw_PREROUTING chain).
615     // It will be accounted for on the v4-* clat interface instead.
616     // Keep that in mind when moving this out of iptables xt_bpf and into tc ingress (or xdp).
617 
618     uint32_t key = skb->ifindex;
619     update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
620     return BPF_MATCH;
621 }
622 
623 DEFINE_SYS_BPF_PROG("schedact/ingress/account", AID_ROOT, AID_NET_ADMIN,
624                     tc_bpf_ingress_account_prog)
625 (struct __sk_buff* skb) {
626     if (is_received_skb(skb)) {
627         // Account for ingress traffic before tc drops it.
628         uint32_t key = skb->ifindex;
629         update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
630     }
631     return TC_ACT_UNSPEC;
632 }
633 
634 // WARNING: Android T's non-updatable netd depends on the name of this program.
635 DEFINE_XTBPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
636 (struct __sk_buff* skb) {
637     uint32_t sock_uid = bpf_get_socket_uid(skb);
638     if (is_system_uid(sock_uid)) return BPF_MATCH;
639 
640     // kernel's DEFAULT_OVERFLOWUID is 65534, this is the overflow 'nobody' uid,
641     // usually this being returned means that skb->sk is NULL during RX
642     // (early decap socket lookup failure), which commonly happens for incoming
643     // packets to an unconnected udp socket.
644     // But it can also happen for egress from a timewait socket.
645     // Let's treat such cases as 'root' which is_system_uid()
646     if (sock_uid == 65534) return BPF_MATCH;
647 
648     UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
649     if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
650     return BPF_NOMATCH;
651 }
652 
653 // WARNING: Android T's non-updatable netd depends on the name of this program.
654 DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
655 (struct __sk_buff* skb) {
656     uint32_t sock_uid = bpf_get_socket_uid(skb);
657     UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
658     uint32_t penalty_box = PENALTY_BOX_USER_MATCH | PENALTY_BOX_ADMIN_MATCH;
659     if (denylistMatch) return denylistMatch->rule & penalty_box ? BPF_MATCH : BPF_NOMATCH;
660     return BPF_NOMATCH;
661 }
662 
get_app_permissions()663 static __always_inline inline uint8_t get_app_permissions() {
664     uint64_t gid_uid = bpf_get_current_uid_gid();
665     /*
666      * A given app is guaranteed to have the same app ID in all the profiles in
667      * which it is installed, and install permission is granted to app for all
668      * user at install time so we only check the appId part of a request uid at
669      * run time. See UserHandle#isSameApp for detail.
670      */
671     uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET;  // == PER_USER_RANGE == 100000
672     uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
673     // if UID not in map, then default to just INTERNET permission.
674     return permissions ? *permissions : BPF_PERMISSION_INTERNET;
675 }
676 
677 DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet_create", AID_ROOT, AID_ROOT, inet_socket_create,
678                           KVER_4_14)
679 (struct bpf_sock* sk) {
680     // A return value of 1 means allow, everything else means deny.
681     return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? 1 : 0;
682 }
683 
684 DEFINE_NETD_V_BPF_PROG_KVER("cgroupsockrelease/inet_release", AID_ROOT, AID_ROOT,
685                             inet_socket_release, KVER_5_10)
686 (struct bpf_sock* sk) {
687     uint64_t cookie = bpf_get_sk_cookie(sk);
688     if (cookie) bpf_cookie_tag_map_delete_elem(&cookie);
689 
690     return 1;
691 }
692 
check_localhost(struct bpf_sock_addr * ctx)693 static __always_inline inline int check_localhost(struct bpf_sock_addr *ctx) {
694     // See include/uapi/linux/bpf.h:
695     //
696     // struct bpf_sock_addr {
697     //   __u32 user_family;	//     R: 4 byte
698     //   __u32 user_ip4;	// BE, R: 1,2,4-byte,   W: 4-byte
699     //   __u32 user_ip6[4];	// BE, R: 1,2,4,8-byte, W: 4,8-byte
700     //   __u32 user_port;	// BE, R: 1,2,4-byte,   W: 4-byte
701     //   __u32 family;		//     R: 4 byte
702     //   __u32 type;		//     R: 4 byte
703     //   __u32 protocol;	//     R: 4 byte
704     //   __u32 msg_src_ip4;	// BE, R: 1,2,4-byte,   W: 4-byte
705     //   __u32 msg_src_ip6[4];	// BE, R: 1,2,4,8-byte, W: 4,8-byte
706     //   __bpf_md_ptr(struct bpf_sock *, sk);
707     // };
708     return 1;
709 }
710 
711 DEFINE_NETD_V_BPF_PROG_KVER("connect4/inet4_connect", AID_ROOT, AID_ROOT, inet4_connect, KVER_4_14)
712 (struct bpf_sock_addr *ctx) {
713     return check_localhost(ctx);
714 }
715 
716 DEFINE_NETD_V_BPF_PROG_KVER("connect6/inet6_connect", AID_ROOT, AID_ROOT, inet6_connect, KVER_4_14)
717 (struct bpf_sock_addr *ctx) {
718     return check_localhost(ctx);
719 }
720 
721 DEFINE_NETD_V_BPF_PROG_KVER("recvmsg4/udp4_recvmsg", AID_ROOT, AID_ROOT, udp4_recvmsg, KVER_4_14)
722 (struct bpf_sock_addr *ctx) {
723     return check_localhost(ctx);
724 }
725 
726 DEFINE_NETD_V_BPF_PROG_KVER("recvmsg6/udp6_recvmsg", AID_ROOT, AID_ROOT, udp6_recvmsg, KVER_4_14)
727 (struct bpf_sock_addr *ctx) {
728     return check_localhost(ctx);
729 }
730 
731 DEFINE_NETD_V_BPF_PROG_KVER("sendmsg4/udp4_sendmsg", AID_ROOT, AID_ROOT, udp4_sendmsg, KVER_4_14)
732 (struct bpf_sock_addr *ctx) {
733     return check_localhost(ctx);
734 }
735 
736 DEFINE_NETD_V_BPF_PROG_KVER("sendmsg6/udp6_sendmsg", AID_ROOT, AID_ROOT, udp6_sendmsg, KVER_4_14)
737 (struct bpf_sock_addr *ctx) {
738     return check_localhost(ctx);
739 }
740 
741 DEFINE_NETD_V_BPF_PROG_KVER("getsockopt/prog", AID_ROOT, AID_ROOT, getsockopt_prog, KVER_5_4)
742 (struct bpf_sockopt *ctx) {
743     // Tell kernel to return 'original' kernel reply (instead of the bpf modified buffer)
744     // This is important if the answer is larger than PAGE_SIZE (max size this bpf hook can provide)
745     ctx->optlen = 0;
746     return 1; // ALLOW
747 }
748 
749 DEFINE_NETD_V_BPF_PROG_KVER("setsockopt/prog", AID_ROOT, AID_ROOT, setsockopt_prog, KVER_5_4)
750 (struct bpf_sockopt *ctx) {
751     // Tell kernel to use/process original buffer provided by userspace.
752     // This is important if it is larger than PAGE_SIZE (max size this bpf hook can handle).
753     ctx->optlen = 0;
754     return 1; // ALLOW
755 }
756 
757 LICENSE("Apache 2.0");
758 CRITICAL("Connectivity and netd");
759 DISABLE_BTF_ON_USER_BUILDS();
760 DISABLE_ON_MAINLINE_BEFORE_U_QPR3();
761