Skip to content

Commit

Permalink
try to adapt for normal c process
Browse files Browse the repository at this point in the history
  • Loading branch information
jschwinger233 committed Jul 14, 2022
1 parent 88fda8d commit 865dd6b
Show file tree
Hide file tree
Showing 10 changed files with 205 additions and 69 deletions.
45 changes: 39 additions & 6 deletions internal/bpf/bpf.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package bpf

import (
"context"
"fmt"
"io"
"time"

Expand Down Expand Up @@ -37,9 +38,14 @@ func (b *BPF) Attach(bin string, uprobes []symparser.Uprobe) (err error) {
for _, uprobe := range uprobes {
switch uprobe.Location {
case symparser.AtEntry:
err = b.AttachEntry(bin, uprobe.Offset)
case symparser.AtExit:
err = b.AttachExit(bin, uprobe.Offset)
err = b.AttachUprobe(bin, uprobe.Offset)
if err == nil {
err = b.AttachUretprobe(bin, uprobe.Offset)
}
case symparser.AtRet:
err = b.AttachRet(bin, uprobe.Offset)
case symparser.AtFramePointer:
err = b.AttachFramePointer(bin, uprobe.Offset)
}
if err != nil {
return
Expand All @@ -57,7 +63,7 @@ func (b *BPF) OpenExecutable(bin string) (_ *link.Executable, err error) {
return b.executables[bin], nil
}

func (b *BPF) AttachEntry(bin string, offset uint64) (err error) {
func (b *BPF) AttachUprobe(bin string, offset uint64) (err error) {
ex, err := b.OpenExecutable(bin)
if err != nil {
return
Expand All @@ -70,12 +76,39 @@ func (b *BPF) AttachEntry(bin string, offset uint64) (err error) {
return
}

func (b *BPF) AttachExit(bin string, offset uint64) (err error) {
func (b *BPF) AttachUretprobe(bin string, offset uint64) (err error) {
ex, err := b.OpenExecutable(bin)
if err != nil {
return
}
fmt.Printf("uretprobe: %x\n", offset)
uprobe, err := ex.Uretprobe("", b.objs.OnExit, &link.UprobeOptions{Offset: offset})
if err != nil {
return err
}
b.closers = append(b.closers, uprobe)
return
}

func (b *BPF) AttachFramePointer(bin string, offset uint64) (err error) {
ex, err := b.OpenExecutable(bin)
if err != nil {
return
}
uprobe, err := ex.Uprobe("", b.objs.OnEntryGolang, &link.UprobeOptions{Offset: offset})
if err != nil {
return err
}
b.closers = append(b.closers, uprobe)
return
}

func (b *BPF) AttachRet(bin string, offset uint64) (err error) {
ex, err := b.OpenExecutable(bin)
if err != nil {
return
}
uprobe, err := ex.Uprobe("", b.objs.OnExit, &link.UprobeOptions{Offset: offset})
uprobe, err := ex.Uprobe("", b.objs.OnExitGolang, &link.UprobeOptions{Offset: offset})
if err != nil {
return err
}
Expand Down
95 changes: 72 additions & 23 deletions internal/bpf/ufuncgraph.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
char __license[] SEC("license") = "Dual MIT/GPL";

struct event {
__u64 goid;
__u64 stack_id;
__u64 caller_ip;
__u64 ip;
__u64 time_ns;
Expand All @@ -25,7 +25,7 @@ const struct event *unused __attribute__((unused));
struct stackwalk {
__u64 depth;
__u64 root_bp;
__u64 goid;
__u64 stack_id;
};

// event_queue is for events commit
Expand All @@ -36,7 +36,7 @@ struct bpf_map_def SEC("maps") event_queue = {
.max_entries = 1000000,
};

// goids is for goid generation
// goids is for stack_id generation
struct bpf_map_def SEC("maps") goids = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
Expand All @@ -54,29 +54,29 @@ struct bpf_map_def SEC("maps") bp_to_event = {

void static backtrace(__u64 bp, struct stackwalk *walk) {
for (walk->depth = 0; walk->depth < MAX_STACK_LAYERS; walk->depth++) {
if (bpf_probe_read_user(&walk->goid, sizeof(walk->goid), (void*)bp) < 0) {
walk->goid = bp;
if (bpf_probe_read_user(&walk->stack_id, sizeof(walk->stack_id), (void*)bp) < 0) {
walk->stack_id = bp;
return;
}
walk->root_bp = bp;
bp = walk->goid;
bp = walk->stack_id;
}
walk->depth = 0xffffffffffffffff;
return;
}

__u64 static next_goid() {
__u64 static new_stack_id() {
__u32 key = 0;
__u32 *goid = bpf_map_lookup_elem(&goids, &key);;
if (!goid)
__u32 *stack_id = bpf_map_lookup_elem(&goids, &key);;
if (!stack_id)
return 0; // should not happen
(*goid)++;
(*stack_id)++;
__u32 cpu = bpf_get_smp_processor_id();
return (*goid) | ((__u64)cpu << 32);
return (*stack_id) | ((__u64)cpu << 32);
}

SEC("uprobe/on_entry")
int on_entry(struct pt_regs *ctx) {
SEC("uprobe/on_entry_golang")
int on_entry_golang(struct pt_regs *ctx) {
struct event this_event;
__builtin_memset(&this_event, 0, sizeof(this_event));

Expand All @@ -97,7 +97,7 @@ int on_entry(struct pt_regs *ctx) {
bpf_probe_read_user(&caller_bp, sizeof(caller_bp), (void*)ctx->rbp);
struct event *caller_event = bpf_map_lookup_elem(&bp_to_event, &caller_bp);
if (caller_event) {
this_event.goid = caller_event->goid;
this_event.stack_id = caller_event->stack_id;
this_event.stack_depth = caller_event->stack_depth + 1;
goto submit_event;
}
Expand All @@ -106,15 +106,15 @@ int on_entry(struct pt_regs *ctx) {
__builtin_memset(&walk, 0, sizeof(walk));
backtrace(ctx->rbp, &walk);
this_event.stack_depth = walk.depth;
this_event.goid = walk.goid;
this_event.stack_id = walk.stack_id;
if (walk.depth == 0xffffffffffffffff) {
this_event.errno = 2;
goto submit_event;
}

if (this_event.goid == 0) {
this_event.goid = next_goid();
bpf_probe_write_user((void*)walk.root_bp, &this_event.goid, sizeof(this_event.goid));
if (this_event.stack_id == 0) {
this_event.stack_id = new_stack_id();
bpf_probe_write_user((void*)walk.root_bp, &this_event.stack_id, sizeof(this_event.stack_id));
goto submit_event;
}

Expand All @@ -124,8 +124,8 @@ int on_entry(struct pt_regs *ctx) {
return 0;
}

SEC("uprobe/on_exit")
int on_exit(struct pt_regs *ctx) {
SEC("uprobe/on_exit_golang")
int on_exit_golang(struct pt_regs *ctx) {
struct event this_event;
__builtin_memset(&this_event, 0, sizeof(this_event));
this_event.hook_point = 1;
Expand All @@ -135,7 +135,7 @@ int on_exit(struct pt_regs *ctx) {
__u64 this_bp = ctx->rsp - 8;
struct event *entry_event = bpf_map_lookup_elem(&bp_to_event, &this_bp);
if (entry_event) {
this_event.goid = entry_event->goid;
this_event.stack_id = entry_event->stack_id;
this_event.stack_depth = entry_event->stack_depth;
goto submit_event;
}
Expand All @@ -144,13 +144,13 @@ int on_exit(struct pt_regs *ctx) {
__builtin_memset(&walk, 0, sizeof(walk));
backtrace(this_bp, &walk);
this_event.stack_depth = walk.depth;
this_event.goid = walk.goid;
this_event.stack_id = walk.stack_id;
if (walk.depth == 0xffffffffffffffff) {
this_event.errno = 2;
goto submit_event;
}

if (this_event.goid == 0) {
if (this_event.stack_id == 0) {
return 0; // dangling exit, do nothing
}

Expand All @@ -159,3 +159,52 @@ int on_exit(struct pt_regs *ctx) {
bpf_map_push_elem(&event_queue, &this_event, BPF_EXIST);
return 0;
}

SEC("uprobe/on_entry")
int on_entry(struct pt_regs *ctx) {
struct event this_event;
__builtin_memset(&this_event, 0, sizeof(this_event));
this_event.hook_point = 0;
this_event.ip = ctx->rip;
bpf_probe_read_user(&this_event.caller_ip, sizeof(this_event.caller_ip), (void*)ctx->rsp);
this_event.time_ns = bpf_ktime_get_ns();

__u64 this_bp = ctx->rsp - 8;
bpf_printk("enter %llx, this bp %llx\n", ctx->rsp, this_bp);
__u64 caller_bp = ctx->rbp;
struct event *caller_event = bpf_map_lookup_elem(&bp_to_event, &caller_bp);
if (caller_event) {
this_event.stack_id = caller_event->stack_id;
this_event.stack_depth = caller_event->stack_depth + 1;
goto submit_event;
}

this_event.stack_id = new_stack_id();

submit_event:
bpf_map_update_elem(&bp_to_event, &this_bp, &this_event, BPF_ANY);
bpf_printk("push event, depth %d, ip %lld, caller bp %lld\n", this_event.stack_depth, this_event.ip, caller_bp);
bpf_map_push_elem(&event_queue, &this_event, BPF_EXIST);
return 0;
}

SEC("uretprobe/on_exit")
int on_exit(struct pt_regs *ctx) {
struct event this_event;
__builtin_memset(&this_event, 0, sizeof(this_event));
this_event.hook_point = 1;
this_event.time_ns = bpf_ktime_get_ns();

__u64 this_bp = ctx->rsp - 16;
bpf_printk("return %llx, this bp %llx\n", ctx->rsp, this_bp);
struct event *entry_event = bpf_map_lookup_elem(&bp_to_event, &this_bp);
if (entry_event) {
this_event.stack_id = entry_event->stack_id;
this_event.stack_depth = entry_event->stack_depth;
bpf_map_delete_elem(&bp_to_event, &this_bp);
bpf_printk("push event\n");
bpf_map_push_elem(&event_queue, &this_event, BPF_EXIST);
}

return 0;
}
16 changes: 11 additions & 5 deletions internal/bpf/ufuncgraph_bpfel_x86.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Binary file modified internal/bpf/ufuncgraph_bpfel_x86.o
Binary file not shown.
26 changes: 13 additions & 13 deletions internal/eventhandler/gevent.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,24 +36,24 @@ func NewGevent(uprobes []symparser.Uprobe, symInterp *SymInterp) (_ *Gevent, err
}

func (p *Gevent) Add(event bpf.UfuncgraphEvent) {
length := len(p.goroutine2events[event.Goid])
length := len(p.goroutine2events[event.StackId])
if length == 0 && event.HookPoint == 1 {
return
}
if length > 0 && event.HookPoint == 0 && p.goroutine2events[event.Goid][length-1].HookPoint == 0 && p.goroutine2events[event.Goid][length-1].StackDepth == event.StackDepth {
return
}
p.goroutine2events[event.Goid] = append(p.goroutine2events[event.Goid], event)
p.goroutine2stack[event.Goid] = p.goroutine2stack[event.Goid] - 2*uint64(event.HookPoint) + 1
//if length > 0 && event.HookPoint == 0 && p.goroutine2events[event.StackId][length-1].HookPoint == 0 && p.goroutine2events[event.StackId][length-1].StackDepth == event.StackDepth {
// return
//}
p.goroutine2events[event.StackId] = append(p.goroutine2events[event.StackId], event)
p.goroutine2stack[event.StackId] = p.goroutine2stack[event.StackId] - 2*uint64(event.HookPoint) + 1
}

func (p *Gevent) Completed(event bpf.UfuncgraphEvent) bool {
return p.goroutine2stack[event.Goid] == 0
return p.goroutine2stack[event.StackId] == 0
}

func (p *Gevent) Clear(event bpf.UfuncgraphEvent) {
delete(p.goroutine2events, event.Goid)
delete(p.goroutine2stack, event.Goid)
delete(p.goroutine2events, event.StackId)
delete(p.goroutine2stack, event.StackId)
}

func (p *Gevent) IsRootEvent(event bpf.UfuncgraphEvent) bool {
Expand All @@ -75,10 +75,10 @@ func (p *Gevent) getRootEventSet() map[string]interface{} {
return p.cache["rootevents"].(map[string]interface{})
}

func (p *Gevent) PrintStack(goid uint64) {
func (p *Gevent) PrintStack(StackId uint64) {
ident := ""
println()
for _, event := range p.goroutine2events[goid] {
for _, event := range p.goroutine2events[StackId] {
t := p.bootTime.Add(time.Duration(event.TimeNs)).Format("2006-01-02 15:04:05.0000")
if event.HookPoint == 0 {
fmt.Printf("%s %s %s { %s\n", t, ident, p.symInterp.Interp(event.Ip, withOffset(false)), p.symInterp.Interp(event.CallerIp, withOffset(true)))
Expand All @@ -95,7 +95,7 @@ func (p *Gevent) PrintStack(goid uint64) {
}

func (g *Gevent) PrintAll() {
for goid := range g.goroutine2events {
g.PrintStack(goid)
for StackId := range g.goroutine2events {
g.PrintStack(StackId)
}
}
5 changes: 3 additions & 2 deletions internal/eventhandler/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ func (h *EventHandler) Handle(ctx context.Context, ch chan bpf.UfuncgraphEvent,
gevent.Add(event)
log.Debugf("add event: %+v", event)
if gevent.Completed(event) {
if gevent.IsRootEvent(event) {
gevent.PrintStack(event.Goid)
if true {
//if gevent.IsRootEvent(event) {
gevent.PrintStack(event.StackId)
}
gevent.Clear(event)
}
Expand Down
Loading

0 comments on commit 865dd6b

Please sign in to comment.