diff options
Diffstat (limited to 'cmd/bpf')
-rw-r--r-- | cmd/bpf/bpf_bpfel.go | 125 | ||||
-rw-r--r-- | cmd/bpf/bpf_bpfel.o | bin | 0 -> 5312 bytes | |||
-rw-r--r-- | cmd/bpf/bpf_usage.c | 73 | ||||
-rw-r--r-- | cmd/bpf/gen.go | 3 | ||||
-rw-r--r-- | cmd/bpf/main.go | 96 |
5 files changed, 297 insertions, 0 deletions
diff --git a/cmd/bpf/bpf_bpfel.go b/cmd/bpf/bpf_bpfel.go new file mode 100644 index 0000000..1efd705 --- /dev/null +++ b/cmd/bpf/bpf_bpfel.go @@ -0,0 +1,125 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 || arm || arm64 || loong64 || mips64le || mipsle || ppc64le || riscv64 + +package main + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + EgressFunc *ebpf.ProgramSpec `ebpf:"egress__func"` + IngressFunc *ebpf.ProgramSpec `ebpf:"ingress_func"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + EgressIp4UsageMap *ebpf.MapSpec `ebpf:"egress_ip4_usage_map"` + IngressIp4UsageMap *ebpf.MapSpec `ebpf:"ingress_ip4_usage_map"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + EgressIp4UsageMap *ebpf.Map `ebpf:"egress_ip4_usage_map"` + IngressIp4UsageMap *ebpf.Map `ebpf:"ingress_ip4_usage_map"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.EgressIp4UsageMap, + m.IngressIp4UsageMap, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + EgressFunc *ebpf.Program `ebpf:"egress__func"` + IngressFunc *ebpf.Program `ebpf:"ingress_func"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.EgressFunc, + p.IngressFunc, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel.o +var _BpfBytes []byte diff --git a/cmd/bpf/bpf_bpfel.o b/cmd/bpf/bpf_bpfel.o Binary files differnew file mode 100644 index 0000000..789c00f --- /dev/null +++ b/cmd/bpf/bpf_bpfel.o diff --git a/cmd/bpf/bpf_usage.c b/cmd/bpf/bpf_usage.c new file mode 100644 index 0000000..5e505da --- /dev/null +++ b/cmd/bpf/bpf_usage.c @@ -0,0 +1,73 @@ +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/ip.h> + +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> + +#define MAX_MAP_ENTRIES 4096 + +char __license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, MAX_MAP_ENTRIES); + __type(key, __u32); // source IPv4 address + __type(value, __u64); // no of bytes +} ingress_ip4_usage_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, MAX_MAP_ENTRIES); + __type(key, __u32); // destination IPv4 address + __type(value, __u64); // no of bytes +} egress_ip4_usage_map SEC(".maps"); + +typedef enum { + UPDATE_USAGE_INGRESS, + UPDATE_USAGE_EGRESS, +} update_usage_t; + +static __always_inline int update_usage(void *map, struct __sk_buff *skb, + update_usage_t traffic) +{ + __u32 ip4; + __u64 len, *usage; + + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct iphdr *ip = data + sizeof(struct ethhdr); + + if (skb->protocol != bpf_htons(ETH_P_IP)) + return TCX_PASS; + else if ((void *)(ip + 1) > data_end) + return TCX_PASS; + + if (traffic == UPDATE_USAGE_INGRESS) + ip4 = ip->saddr; + else + ip4 = ip->daddr; + len = skb->len - sizeof(struct ethhdr); + + usage = bpf_map_lookup_elem(map, &ip4); + if (!usage) { + /* no entry in the map for this IP address yet. */ + bpf_map_update_elem(map, &ip4, &len, BPF_ANY); + } else { + __sync_fetch_and_add(usage, len); + } + + return TCX_PASS; +} + +SEC("tc") +int ingress_func(struct __sk_buff *skb) +{ + return update_usage(&ingress_ip4_usage_map, skb, UPDATE_USAGE_INGRESS); +} + +SEC("tc") +int egress__func(struct __sk_buff *skb) +{ + return update_usage(&egress_ip4_usage_map, skb, UPDATE_USAGE_EGRESS); +} diff --git a/cmd/bpf/gen.go b/cmd/bpf/gen.go new file mode 100644 index 0000000..c90c0a7 --- /dev/null +++ b/cmd/bpf/gen.go @@ -0,0 +1,3 @@ +package main + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -target bpfel bpf bpf_usage.c diff --git a/cmd/bpf/main.go b/cmd/bpf/main.go new file mode 100644 index 0000000..5a5f842 --- /dev/null +++ b/cmd/bpf/main.go @@ -0,0 +1,96 @@ +package main + +import ( + "fmt" + "log" + "net" + "net/netip" + "os" + "time" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" +) + +type Usage struct { + ingress uint64 + egress uint64 +} + +func main() { + if len(os.Args) < 2 { + log.Fatalf("Please specify a network interface") + } + + ifaceName := os.Args[1] + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + log.Fatalf("lookup network iface %q: %s", ifaceName, err) + } + + // Load pre-compiled programs into the kernel. + objs := bpfObjects{} + if err := loadBpfObjects(&objs, nil); err != nil { + log.Fatalf("loading objects: %s", err) + } + defer objs.Close() + + // Attach the program. + ingressLink, err := link.AttachTCX(link.TCXOptions{ + Interface: iface.Index, + Program: objs.IngressFunc, + Attach: ebpf.AttachTCXIngress, + }) + if err != nil { + log.Fatalf("could not attach TCx program: %s", err) + } + defer ingressLink.Close() + + // Attach the program to Egress TC. + egressLink, err := link.AttachTCX(link.TCXOptions{ + Interface: iface.Index, + Program: objs.EgressFunc, + Attach: ebpf.AttachTCXEgress, + }) + if err != nil { + log.Fatalf("could not attach TCx program: %s", err) + } + defer egressLink.Close() + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for range ticker.C { + prettyPrint(objs.IngressIp4UsageMap, objs.EgressIp4UsageMap) + } +} + +func prettyPrint(ingress *ebpf.Map, egress *ebpf.Map) { + ipUsage := make(map[netip.Addr]Usage) + var key netip.Addr + var value uint64 + + iter := ingress.Iterate() + for iter.Next(&key, &value) { + ipUsage[key] = Usage { + ingress: value, + } + } + + iter = egress.Iterate() + for iter.Next(&key, &value) { + usage, ok := ipUsage[key] + if (ok) { + usage.egress = value + } else { + usage = Usage { egress: value } + } + + ipUsage[key] = usage + } + + fmt.Print("\033[H\033[2J") + fmt.Printf("%15s\t%16s\t%16s\n", "ip", "down", "up") + for ip4, usage := range ipUsage { + fmt.Printf("%15s\t%16d\t%16d\n", ip4, usage.ingress, usage.egress) + } +} |