On Tue, Dec 13, 2016 at 11:24 AM, Alexei Starovoitov
<[email protected]> wrote:
> On Sun, Dec 11, 2016 at 5:27 PM, Ming Lei via iovisor-dev
> <[email protected]> wrote:
>> On Sun, Dec 11, 2016 at 9:21 AM, Chaiken, Alison via iovisor-dev
>> <[email protected]> wrote:
>>> Ming Lei <[email protected]> inquires:
>>>
>>> Bcc can be installed on ubuntu 16.04/arm64 successfully, but when
>>> I try to trace, the folllowing failure[1] is triggered.
>>> So is bcc not ready for arm64? Or something is wrong?
>>
>> Also for the prog building failure of 'LLVM ERROR: Inline asm', even though
>> it is workaround on x86, maybe one day some one changes kernel headers
>> under arch a bit and may cause the similar issue with arm64 too.
>>
>> So is there one generic solution for the issue? Is it possible to
>> preprocess the source file first and replace asm() with noop
>> or bpf_trace_printk(WARN)? Or other solutions?
>
> not sure what the best approach here.
> we can try to ignore inline asm in the llvm during codegen, but
> since bpf backend was asked to emit such code, most likely it means
> that this asm is used somewhere so even if we ignore it, the generated

Looks the inline asm isn't used/called in the prog, but it is still generated.

> code probably will be broken anyway and won't pass the verifier.
> Is there a way to reproduce it without bcc?

samples/bpf/ has been broken in other ways.

> so you can send me a pre-processed .c file or even better .ll file ?
> I don't have arm64 to reproduce it...

Please see the whole log of building the prog.



Thanks,
Ming Lei
ubuntu@ubuntu:~/git/bcc/tools$ sudo ./trace.py sys_open
clang -cc1 -triple aarch64-unknown-linux-gnu -emit-llvm-bc -emit-llvm-uselists -disable-free -disable-llvm-verifier -main-file-name main.c -mrelocation-model static -mthread-model posix -mdisable-fp-elim -fmath-errno -masm-verbose -mconstructor-aliases -fuse-init-array -target-cpu generic -target-feature +neon -target-abi aapcs -dwarf-column-info -debugger-tuning=gdb -coverage-file /usr/src/linux-headers-4.8.0-30-generic/main.c -nostdsysteminc -nobuiltininc -resource-dir ../lib/clang/3.8.0 -isystem /virtual/lib/clang/include -include ./include/linux/kconfig.h -include /virtual/include/bcc/bpf.h -include /virtual/include/bcc/helpers.h -isystem /virtual/include -I /home/ubuntu/git/bcc/tools -I ./arch/arm64/include -I arch/arm64/include/generated/uapi -I arch/arm64/include/generated -I include -I ./arch/arm64/include/uapi -I arch/arm64/include/generated/uapi -I ./include/uapi -I include/generated/uapi -D __KERNEL__ -D __HAVE_BUILTIN_BSWAP16__ -D __HAVE_BUILTIN_BSWAP32__ -D __HAVE_BUILTIN_BSWAP64__ -Wno-deprecated-declarations -Wno-gnu-variable-sized-type-not-at-end -Wno-unused-value -Wno-pointer-sign -fdebug-compilation-dir /usr/src/linux-headers-4.8.0-30-generic -ferror-limit 19 -fmessage-length 80 -fallow-half-arguments-and-returns -fno-signed-char -fobjc-runtime=gcc -fdiagnostics-show-option -o main.bc -x c /virtual/main.c
In file included from /virtual/main.c:2:
In file included from include/linux/ptrace.h:5:
In file included from include/linux/sched.h:17:
In file included from include/linux/kernel.h:13:
In file included from include/linux/printk.h:8:
In file included from include/linux/cache.h:5:
In file included from ./arch/arm64/include/asm/cache.h:19:
In file included from ./arch/arm64/include/asm/cachetype.h:19:
In file included from ./arch/arm64/include/asm/cputype.h:94:
./arch/arm64/include/asm/sysreg.h:260:43: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
        asm volatile("mrs %0, sctlr_el1" : "=r" (val));
                                                 ^
./arch/arm64/include/asm/sysreg.h:260:20: note: use constraint modifier "w"
        asm volatile("mrs %0, sctlr_el1" : "=r" (val));
                          ^~
                          %w0
./arch/arm64/include/asm/sysreg.h:263:44: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
        asm volatile("msr sctlr_el1, %0" : : "r" (val));
                                                  ^
./arch/arm64/include/asm/sysreg.h:263:31: note: use constraint modifier "w"
        asm volatile("msr sctlr_el1, %0" : : "r" (val));
                                     ^~
                                     %w0
In file included from /virtual/main.c:2:
In file included from include/linux/ptrace.h:5:
In file included from include/linux/sched.h:19:
In file included from include/linux/timex.h:65:
In file included from ./arch/arm64/include/asm/timex.h:19:
./arch/arm64/include/asm/arch_timer.h:41:50: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("msr cntp_ctl_el0,  %0" : : "r" (val));
                                                                      ^
./arch/arm64/include/asm/arch_timer.h:41:37: note: use constraint modifier "w"
                        asm volatile("msr cntp_ctl_el0,  %0" : : "r" (val));
                                                         ^~
                                                         %w0
./arch/arm64/include/asm/arch_timer.h:44:50: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
                                                                      ^
./arch/arm64/include/asm/arch_timer.h:44:37: note: use constraint modifier "w"
                        asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
                                                         ^~
                                                         %w0
./arch/arm64/include/asm/arch_timer.h:50:50: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("msr cntv_ctl_el0,  %0" : : "r" (val));
                                                                      ^
./arch/arm64/include/asm/arch_timer.h:50:37: note: use constraint modifier "w"
                        asm volatile("msr cntv_ctl_el0,  %0" : : "r" (val));
                                                         ^~
                                                         %w0
./arch/arm64/include/asm/arch_timer.h:53:50: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
                                                                      ^
./arch/arm64/include/asm/arch_timer.h:53:37: note: use constraint modifier "w"
                        asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
                                                         ^~
                                                         %w0
./arch/arm64/include/asm/arch_timer.h:69:49: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("mrs %0,  cntp_ctl_el0" : "=r" (val));
                                                                     ^
./arch/arm64/include/asm/arch_timer.h:69:22: note: use constraint modifier "w"
                        asm volatile("mrs %0,  cntp_ctl_el0" : "=r" (val));
                                          ^~
                                          %w0
./arch/arm64/include/asm/arch_timer.h:72:49: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
                                                                     ^
./arch/arm64/include/asm/arch_timer.h:72:22: note: use constraint modifier "w"
                        asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
                                          ^~
                                          %w0
./arch/arm64/include/asm/arch_timer.h:78:49: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("mrs %0,  cntv_ctl_el0" : "=r" (val));
                                                                     ^
./arch/arm64/include/asm/arch_timer.h:78:22: note: use constraint modifier "w"
                        asm volatile("mrs %0,  cntv_ctl_el0" : "=r" (val));
                                          ^~
                                          %w0
./arch/arm64/include/asm/arch_timer.h:81:49: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
                        asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
                                                                     ^
./arch/arm64/include/asm/arch_timer.h:81:22: note: use constraint modifier "w"
                        asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
                                          ^~
                                          %w0
./arch/arm64/include/asm/arch_timer.h:92:46: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
        asm volatile("mrs %0,   cntfrq_el0" : "=r" (val));
                                                    ^
./arch/arm64/include/asm/arch_timer.h:92:20: note: use constraint modifier "w"
        asm volatile("mrs %0,   cntfrq_el0" : "=r" (val));
                          ^~
                          %w0
./arch/arm64/include/asm/arch_timer.h:99:45: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
        asm volatile("mrs       %0, cntkctl_el1" : "=r" (cntkctl));
                                                         ^
./arch/arm64/include/asm/arch_timer.h:99:20: note: use constraint modifier "w"
        asm volatile("mrs       %0, cntkctl_el1" : "=r" (cntkctl));
                                ^~
                                %w0
./arch/arm64/include/asm/arch_timer.h:105:46: warning: value size does not match
      register size specified by the constraint and modifier
      [-Wasm-operand-widths]
        asm volatile("msr       cntkctl_el1, %0" : : "r" (cntkctl));
                                                          ^
./arch/arm64/include/asm/arch_timer.h:105:33: note: use constraint modifier "w"
        asm volatile("msr       cntkctl_el1, %0" : : "r" (cntkctl));
                                             ^~
                                             %w0

#include <linux/ptrace.h>
#include <linux/sched.h>        /* For TASK_COMM_LEN */

struct probe_sys_open_1_data_t
{
        u64 timestamp_ns;
        u32 tgid;
        u32 pid;
        char comm[TASK_COMM_LEN];



};

BPF_PERF_OUTPUT(probe_sys_open_1_events);


__attribute__((section(".bpf.fn.probe_sys_open_1")))
int probe_sys_open_1(struct pt_regs *ctx)
{
        u64 __pid_tgid = bpf_get_current_pid_tgid();
        u32 __tgid = __pid_tgid >> 32;
        u32 __pid = __pid_tgid; // implicit cast to u32 for bottom half
        
        if (__tgid == 5577) { return 0; }
                
        
        
        if (!(1)) return 0;

        struct probe_sys_open_1_data_t __data = {0};
        __data.timestamp_ns = bpf_ktime_get_ns();
        __data.tgid = __tgid;
        __data.pid = __pid;
        bpf_get_current_comm(&__data.comm, sizeof(__data.comm));


        bpf_perf_event_output(ctx, bpf_pseudo_fd(1, 3), bpf_get_smp_processor_id(), &__data, sizeof(__data));
        return 0;
}
13 warnings generated.
; ModuleID = 'sscanf'

@fmt = private unnamed_addr constant [3 x i8] c"%i\00"
@fmt.1 = private unnamed_addr constant [5 x i8] c"0x%x\00"

; Function Attrs: nounwind
define i32 @reader0(i8* nocapture readonly %in, i32* %out) #0 {
entry:
  %0 = tail call i32 (i8*, i8*, ...) @sscanf(i8* %in, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @fmt, i64 0, i64 0), i32* %out)
  %not. = icmp ne i32 %0, 1
  %. = sext i1 %not. to i32
  ret i32 %.
}

; Function Attrs: nounwind
declare i32 @sscanf(i8* nocapture readonly, i8* nocapture readonly, ...) #0

; Function Attrs: nounwind
define i32 @writer0(i8* nocapture %out, i64 %len, i32* nocapture readonly %in) #0 {
entry:
  %0 = load i32, i32* %in, align 4
  %1 = tail call i32 (i8*, i64, i8*, ...) @snprintf(i8* %out, i64 %len, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @fmt.1, i64 0, i64 0), i32 %0)
  ret i32 %1
}

; Function Attrs: nounwind
declare i32 @snprintf(i8* nocapture, i64, i8* nocapture readonly, ...) #0

attributes #0 = { nounwind }
; ModuleID = '/virtual/main.c'
target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128"
target triple = "bpf-pc-linux"

module asm "\09.irp\09num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30"
module asm "\09.equ\09.L__reg_num_x\5Cnum, \5Cnum"
module asm "\09.endr"
module asm "\09.equ\09.L__reg_num_xzr, 31"
module asm ""
module asm "\09.macro\09mrs_s, rt, sreg"
module asm "\09.inst\090xd5200000|(\5Csreg)|(.L__reg_num_\5Crt)"
module asm "\09.endm"
module asm ""
module asm "\09.macro\09msr_s, sreg, rt"
module asm "\09.inst\090xd5000000|(\5Csreg)|(.L__reg_num_\5Crt)"
module asm "\09.endm"

%struct.probe_sys_open_1_events_table_t = type { i32, i32, i32 (i8*, i8*, i32)*, i32 (i8*, i32, i8*, i32)*, [0 x i32] }
%struct.pt_regs = type { %union.anon, i64, i64, i64, i64 }
%union.anon = type { %struct.user_pt_regs }
%struct.user_pt_regs = type { [31 x i64], i64, i64, i64 }
%struct.probe_sys_open_1_data_t = type { i64, i32, i32, [16 x i8] }

@_license = global [4 x i8] c"GPL\00", section "license", align 1
@_version = global i32 264203, section "version", align 4
@probe_sys_open_1_events = global %struct.probe_sys_open_1_events_table_t zeroinitializer, section "maps/perf_output", align 8
@llvm.used = appending global [9 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @_license, i32 0, i32 0), i8* bitcast (i32* @_version to i8*), i8* bitcast (i64 (i8*, i64, i64, i64)* @bpf_dext_pkt to i8*), i8* bitcast (void (i8*, i64, i64, i64, i64)* @bpf_dins_pkt to i8*), i8* bitcast (i32 (i8*, i64, i64, i64, i64)* @bpf_l3_csum_replace_ to i8*), i8* bitcast (i32 (i8*, i64, i64, i64, i64)* @bpf_l4_csum_replace_ to i8*), i8* bitcast (i32 (i64, i8*)* @bpf_map_delete_elem_ to i8*), i8* bitcast (i8* (i64, i8*)* @bpf_map_lookup_elem_ to i8*), i8* bitcast (i32 (i64, i8*, i8*, i64)* @bpf_map_update_elem_ to i8*)], section "llvm.metadata"

; Function Attrs: alwaysinline inlinehint norecurse nounwind readonly
define internal i64 @bpf_dext_pkt(i8* %pkt, i64 %off, i64 %bofs, i64 %bsz) #0 section "helpers" {
  %1 = icmp eq i64 %bofs, 0
  %2 = icmp eq i64 %bsz, 8
  %or.cond = and i1 %1, %2
  br i1 %or.cond, label %3, label %5

; <label>:3                                       ; preds = %0
  %4 = tail call i64 @llvm.bpf.load.byte(i8* %pkt, i64 %off)
  br label %70

; <label>:5                                       ; preds = %0
  %6 = add i64 %bsz, %bofs
  %7 = icmp ult i64 %6, 9
  br i1 %7, label %8, label %17

; <label>:8                                       ; preds = %5
  %9 = tail call i64 @llvm.bpf.load.byte(i8* %pkt, i64 %off)
  %10 = sub i64 8, %6
  %11 = lshr i64 %9, %10
  %12 = icmp ult i64 %bsz, 64
  %13 = shl i64 1, %bsz
  %14 = add i64 %13, -1
  %15 = select i1 %12, i64 %14, i64 -1
  %16 = and i64 %11, %15
  br label %70

; <label>:17                                      ; preds = %5
  %18 = icmp eq i64 %bsz, 16
  %or.cond1 = and i1 %1, %18
  br i1 %or.cond1, label %19, label %21

; <label>:19                                      ; preds = %17
  %20 = tail call i64 @llvm.bpf.load.half(i8* %pkt, i64 %off)
  br label %70

; <label>:21                                      ; preds = %17
  %22 = icmp ult i64 %6, 17
  br i1 %22, label %23, label %32

; <label>:23                                      ; preds = %21
  %24 = tail call i64 @llvm.bpf.load.half(i8* %pkt, i64 %off)
  %25 = sub i64 16, %6
  %26 = lshr i64 %24, %25
  %27 = icmp ult i64 %bsz, 64
  %28 = shl i64 1, %bsz
  %29 = add i64 %28, -1
  %30 = select i1 %27, i64 %29, i64 -1
  %31 = and i64 %26, %30
  br label %70

; <label>:32                                      ; preds = %21
  %33 = icmp eq i64 %bsz, 32
  %or.cond2 = and i1 %1, %33
  br i1 %or.cond2, label %34, label %36

; <label>:34                                      ; preds = %32
  %35 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %off)
  br label %70

; <label>:36                                      ; preds = %32
  %37 = icmp ult i64 %6, 33
  br i1 %37, label %38, label %47

; <label>:38                                      ; preds = %36
  %39 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %off)
  %40 = sub i64 32, %6
  %41 = lshr i64 %39, %40
  %42 = icmp ult i64 %bsz, 64
  %43 = shl i64 1, %bsz
  %44 = add i64 %43, -1
  %45 = select i1 %42, i64 %44, i64 -1
  %46 = and i64 %41, %45
  br label %70

; <label>:47                                      ; preds = %36
  %48 = icmp eq i64 %bsz, 64
  %or.cond3 = and i1 %1, %48
  br i1 %or.cond3, label %49, label %55

; <label>:49                                      ; preds = %47
  %50 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %off) #7
  %51 = shl i64 %50, 32
  %52 = add i64 %off, 4
  %53 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %52) #7
  %54 = or i64 %51, %53
  br label %70

; <label>:55                                      ; preds = %47
  %56 = icmp ult i64 %6, 65
  br i1 %56, label %57, label %70

; <label>:57                                      ; preds = %55
  %58 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %off) #7
  %59 = shl i64 %58, 32
  %60 = add i64 %off, 4
  %61 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %60) #7
  %62 = or i64 %59, %61
  %63 = sub i64 64, %6
  %64 = lshr i64 %62, %63
  %65 = icmp ult i64 %bsz, 64
  %66 = shl i64 1, %bsz
  %67 = add i64 %66, -1
  %68 = select i1 %65, i64 %67, i64 -1
  %69 = and i64 %64, %68
  br label %70

; <label>:70                                      ; preds = %55, %57, %49, %38, %34, %23, %19, %8, %3
  %.0 = phi i64 [ %4, %3 ], [ %16, %8 ], [ %20, %19 ], [ %31, %23 ], [ %35, %34 ], [ %46, %38 ], [ %54, %49 ], [ %69, %57 ], [ 0, %55 ]
  ret i64 %.0
}

; Function Attrs: alwaysinline nounwind readonly
declare i64 @llvm.bpf.load.byte(i8*, i64) #1

; Function Attrs: alwaysinline nounwind readonly
declare i64 @llvm.bpf.load.half(i8*, i64) #1

; Function Attrs: alwaysinline nounwind readonly
declare i64 @llvm.bpf.load.word(i8*, i64) #1

; Function Attrs: alwaysinline inlinehint nounwind
define internal void @bpf_dins_pkt(i8* %pkt, i64 %off, i64 %bofs, i64 %bsz, i64 %val) #2 section "helpers" {
  %1 = alloca i64, align 8
  %v = alloca i8, align 1
  %v1 = alloca i16, align 2
  %v2 = alloca i16, align 2
  %v3 = alloca i32, align 4
  %v4 = alloca i32, align 4
  %v5 = alloca i64, align 8
  %v6 = alloca i64, align 8
  store i64 %val, i64* %1, align 8
  %2 = icmp eq i64 %bofs, 0
  %3 = icmp eq i64 %bsz, 8
  %or.cond = and i1 %2, %3
  br i1 %or.cond, label %4, label %7

; <label>:4                                       ; preds = %0
  %5 = bitcast i64* %1 to i8*
  %6 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %5, i64 1, i64 0) #7
  br label %114

; <label>:7                                       ; preds = %0
  %8 = add i64 %bsz, %bofs
  %9 = icmp ult i64 %8, 9
  br i1 %9, label %10, label %25

; <label>:10                                      ; preds = %7
  %11 = tail call i64 @llvm.bpf.load.byte(i8* %pkt, i64 %off)
  %12 = icmp ult i64 %bsz, 64
  %13 = shl i64 1, %bsz
  %14 = add i64 %13, -1
  %15 = select i1 %12, i64 %14, i64 -1
  %16 = sub i64 8, %8
  %17 = shl i64 %15, %16
  %18 = xor i64 %17, 255
  %19 = and i64 %11, %18
  %20 = and i64 %15, %val
  %21 = shl i64 %20, %16
  %22 = or i64 %21, %19
  %23 = trunc i64 %22 to i8
  store i8 %23, i8* %v, align 1
  %24 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* nonnull %v, i64 1, i64 0) #7
  br label %114

; <label>:25                                      ; preds = %7
  %26 = icmp eq i64 %bsz, 16
  %or.cond7 = and i1 %2, %26
  br i1 %or.cond7, label %27, label %36

; <label>:27                                      ; preds = %25
  %28 = trunc i64 %val to i32
  %29 = shl i32 %28, 8
  %30 = lshr i32 %28, 8
  %31 = and i32 %30, 255
  %32 = or i32 %31, %29
  %33 = trunc i32 %32 to i16
  store i16 %33, i16* %v1, align 2
  %34 = bitcast i16* %v1 to i8*
  %35 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %34, i64 2, i64 0) #7
  br label %114

; <label>:36                                      ; preds = %25
  %37 = icmp ult i64 %8, 17
  br i1 %37, label %38, label %60

; <label>:38                                      ; preds = %36
  %39 = tail call i64 @llvm.bpf.load.half(i8* %pkt, i64 %off)
  %40 = icmp ult i64 %bsz, 64
  %41 = shl i64 1, %bsz
  %42 = add i64 %41, -1
  %43 = select i1 %40, i64 %42, i64 -1
  %44 = sub i64 16, %8
  %45 = shl i64 %43, %44
  %46 = xor i64 %45, 65535
  %47 = and i64 %39, %46
  %48 = and i64 %43, %val
  %49 = shl i64 %48, %44
  %50 = and i64 %47, 65535
  %51 = or i64 %49, %50
  %52 = trunc i64 %51 to i32
  %53 = shl i32 %52, 8
  %54 = lshr i32 %52, 8
  %55 = and i32 %54, 255
  %56 = or i32 %55, %53
  %57 = trunc i32 %56 to i16
  store i16 %57, i16* %v2, align 2
  %58 = bitcast i16* %v2 to i8*
  %59 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %58, i64 2, i64 0) #7
  br label %114

; <label>:60                                      ; preds = %36
  %61 = icmp eq i64 %bsz, 32
  %or.cond8 = and i1 %2, %61
  br i1 %or.cond8, label %62, label %67

; <label>:62                                      ; preds = %60
  %63 = trunc i64 %val to i32
  %64 = tail call i32 @llvm.bswap.i32(i32 %63) #7
  store i32 %64, i32* %v3, align 4
  %65 = bitcast i32* %v3 to i8*
  %66 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %65, i64 4, i64 0) #7
  br label %114

; <label>:67                                      ; preds = %60
  %68 = icmp ult i64 %8, 33
  br i1 %68, label %69, label %86

; <label>:69                                      ; preds = %67
  %70 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %off)
  %71 = icmp ult i64 %bsz, 64
  %72 = shl i64 1, %bsz
  %73 = add i64 %72, -1
  %74 = select i1 %71, i64 %73, i64 -1
  %75 = sub i64 32, %8
  %76 = shl i64 %74, %75
  %77 = xor i64 %76, 4294967295
  %78 = and i64 %70, %77
  %79 = and i64 %74, %val
  %80 = shl i64 %79, %75
  %81 = or i64 %80, %78
  %82 = trunc i64 %81 to i32
  %83 = tail call i32 @llvm.bswap.i32(i32 %82) #7
  store i32 %83, i32* %v4, align 4
  %84 = bitcast i32* %v4 to i8*
  %85 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %84, i64 4, i64 0) #7
  br label %114

; <label>:86                                      ; preds = %67
  %87 = icmp eq i64 %bsz, 64
  %or.cond9 = and i1 %2, %87
  br i1 %or.cond9, label %88, label %92

; <label>:88                                      ; preds = %86
  %89 = tail call i64 @llvm.bswap.i64(i64 %val) #7
  store i64 %89, i64* %v5, align 8
  %90 = bitcast i64* %v5 to i8*
  %91 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %90, i64 8, i64 0) #7
  br label %114

; <label>:92                                      ; preds = %86
  %93 = icmp ult i64 %8, 65
  br i1 %93, label %94, label %114

; <label>:94                                      ; preds = %92
  %95 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %off) #7
  %96 = shl i64 %95, 32
  %97 = add i64 %off, 4
  %98 = tail call i64 @llvm.bpf.load.word(i8* %pkt, i64 %97) #7
  %99 = or i64 %96, %98
  %100 = icmp ult i64 %bsz, 64
  %101 = shl i64 1, %bsz
  %102 = add i64 %101, -1
  %103 = select i1 %100, i64 %102, i64 -1
  %104 = sub i64 64, %8
  %105 = shl i64 %103, %104
  %106 = xor i64 %105, -1
  %107 = and i64 %99, %106
  %108 = and i64 %103, %val
  %109 = shl i64 %108, %104
  %110 = or i64 %109, %107
  %111 = tail call i64 @llvm.bswap.i64(i64 %110) #7
  store i64 %111, i64* %v6, align 8
  %112 = bitcast i64* %v6 to i8*
  %113 = call i32 inttoptr (i64 9 to i32 (i8*, i64, i8*, i64, i64)*)(i8* %pkt, i64 %off, i8* %112, i64 8, i64 0) #7
  br label %114

; <label>:114                                     ; preds = %10, %38, %69, %92, %94, %88, %62, %27, %4
  ret void
}

; Function Attrs: alwaysinline inlinehint nounwind
define internal i8* @bpf_map_lookup_elem_(i64 %map, i8* %key) #2 section "helpers" {
  %1 = inttoptr i64 %map to i8*
  %2 = tail call i8* inttoptr (i64 1 to i8* (i8*, i8*)*)(i8* %1, i8* %key) #7
  ret i8* %2
}

; Function Attrs: alwaysinline inlinehint nounwind
define internal i32 @bpf_map_update_elem_(i64 %map, i8* %key, i8* %value, i64 %flags) #2 section "helpers" {
  %1 = inttoptr i64 %map to i8*
  %2 = tail call i32 inttoptr (i64 2 to i32 (i8*, i8*, i8*, i64)*)(i8* %1, i8* %key, i8* %value, i64 %flags) #7
  ret i32 %2
}

; Function Attrs: alwaysinline inlinehint nounwind
define internal i32 @bpf_map_delete_elem_(i64 %map, i8* %key) #2 section "helpers" {
  %1 = inttoptr i64 %map to i8*
  %2 = tail call i32 inttoptr (i64 3 to i32 (i8*, i8*)*)(i8* %1, i8* %key) #7
  ret i32 %2
}

; Function Attrs: alwaysinline inlinehint nounwind
define internal i32 @bpf_l3_csum_replace_(i8* %ctx, i64 %off, i64 %from, i64 %to, i64 %flags) #2 section "helpers" {
  %1 = and i64 %flags, 15
  switch i64 %1, label %30 [
    i64 2, label %2
    i64 4, label %18
    i64 8, label %26
  ]

; <label>:2                                       ; preds = %0
  %3 = trunc i64 %from to i32
  %4 = shl i32 %3, 8
  %5 = lshr i32 %3, 8
  %6 = and i32 %5, 255
  %7 = or i32 %6, %4
  %8 = zext i32 %7 to i64
  %9 = and i64 %8, 65535
  %10 = trunc i64 %to to i32
  %11 = shl i32 %10, 8
  %12 = lshr i32 %10, 8
  %13 = and i32 %12, 255
  %14 = or i32 %13, %11
  %15 = zext i32 %14 to i64
  %16 = and i64 %15, 65535
  %17 = tail call i32 inttoptr (i64 10 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %9, i64 %16, i64 %flags) #7
  br label %32

; <label>:18                                      ; preds = %0
  %19 = trunc i64 %from to i32
  %20 = tail call i32 @llvm.bswap.i32(i32 %19) #7
  %21 = zext i32 %20 to i64
  %22 = trunc i64 %to to i32
  %23 = tail call i32 @llvm.bswap.i32(i32 %22) #7
  %24 = zext i32 %23 to i64
  %25 = tail call i32 inttoptr (i64 10 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %21, i64 %24, i64 %flags) #7
  br label %32

; <label>:26                                      ; preds = %0
  %27 = tail call i64 @llvm.bswap.i64(i64 %from) #7
  %28 = tail call i64 @llvm.bswap.i64(i64 %to) #7
  %29 = tail call i32 inttoptr (i64 10 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %27, i64 %28, i64 %flags) #7
  br label %32

; <label>:30                                      ; preds = %0
  %31 = tail call i32 inttoptr (i64 10 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %from, i64 %to, i64 %flags) #7
  br label %32

; <label>:32                                      ; preds = %30, %26, %18, %2
  %.0 = phi i32 [ %31, %30 ], [ %29, %26 ], [ %25, %18 ], [ %17, %2 ]
  ret i32 %.0
}

; Function Attrs: alwaysinline inlinehint nounwind
define internal i32 @bpf_l4_csum_replace_(i8* %ctx, i64 %off, i64 %from, i64 %to, i64 %flags) #2 section "helpers" {
  %1 = and i64 %flags, 15
  switch i64 %1, label %30 [
    i64 2, label %2
    i64 4, label %18
    i64 8, label %26
  ]

; <label>:2                                       ; preds = %0
  %3 = trunc i64 %from to i32
  %4 = shl i32 %3, 8
  %5 = lshr i32 %3, 8
  %6 = and i32 %5, 255
  %7 = or i32 %6, %4
  %8 = zext i32 %7 to i64
  %9 = and i64 %8, 65535
  %10 = trunc i64 %to to i32
  %11 = shl i32 %10, 8
  %12 = lshr i32 %10, 8
  %13 = and i32 %12, 255
  %14 = or i32 %13, %11
  %15 = zext i32 %14 to i64
  %16 = and i64 %15, 65535
  %17 = tail call i32 inttoptr (i64 11 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %9, i64 %16, i64 %flags) #7
  br label %32

; <label>:18                                      ; preds = %0
  %19 = trunc i64 %from to i32
  %20 = tail call i32 @llvm.bswap.i32(i32 %19) #7
  %21 = zext i32 %20 to i64
  %22 = trunc i64 %to to i32
  %23 = tail call i32 @llvm.bswap.i32(i32 %22) #7
  %24 = zext i32 %23 to i64
  %25 = tail call i32 inttoptr (i64 11 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %21, i64 %24, i64 %flags) #7
  br label %32

; <label>:26                                      ; preds = %0
  %27 = tail call i64 @llvm.bswap.i64(i64 %from) #7
  %28 = tail call i64 @llvm.bswap.i64(i64 %to) #7
  %29 = tail call i32 inttoptr (i64 11 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %27, i64 %28, i64 %flags) #7
  br label %32

; <label>:30                                      ; preds = %0
  %31 = tail call i32 inttoptr (i64 11 to i32 (i8*, i64, i64, i64, i64)*)(i8* %ctx, i64 %off, i64 %from, i64 %to, i64 %flags) #7
  br label %32

; <label>:32                                      ; preds = %30, %26, %18, %2
  %.0 = phi i32 [ %31, %30 ], [ %29, %26 ], [ %25, %18 ], [ %17, %2 ]
  ret i32 %.0
}

; Function Attrs: alwaysinline nounwind
define i32 @probe_sys_open_1(%struct.pt_regs* %ctx) #3 section ".bpf.fn.probe_sys_open_1" {
  %__data = alloca %struct.probe_sys_open_1_data_t, align 8
  %1 = tail call i64 inttoptr (i64 14 to i64 ()*)() #7
  %2 = lshr i64 %1, 32
  %3 = trunc i64 %2 to i32
  %4 = icmp eq i32 %3, 5577
  br i1 %4, label %19, label %5

; <label>:5                                       ; preds = %0
  %6 = trunc i64 %1 to i32
  %7 = bitcast %struct.probe_sys_open_1_data_t* %__data to i8*
  call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 32, i32 8, i1 false)
  %8 = tail call i64 inttoptr (i64 5 to i64 ()*)() #7
  %9 = getelementptr inbounds %struct.probe_sys_open_1_data_t, %struct.probe_sys_open_1_data_t* %__data, i64 0, i32 0
  store i64 %8, i64* %9, align 8
  %10 = getelementptr inbounds %struct.probe_sys_open_1_data_t, %struct.probe_sys_open_1_data_t* %__data, i64 0, i32 1
  store i32 %3, i32* %10, align 8
  %11 = getelementptr inbounds %struct.probe_sys_open_1_data_t, %struct.probe_sys_open_1_data_t* %__data, i64 0, i32 2
  store i32 %6, i32* %11, align 4
  %12 = getelementptr inbounds %struct.probe_sys_open_1_data_t, %struct.probe_sys_open_1_data_t* %__data, i64 0, i32 3, i64 0
  %13 = call i32 inttoptr (i64 16 to i32 (i8*, i32)*)(i8* %12, i32 16) #7
  %14 = bitcast %struct.pt_regs* %ctx to i8*
  %15 = call i64 @llvm.bpf.pseudo(i64 1, i64 3)
  %16 = inttoptr i64 %15 to i8*
  %17 = call i64 inttoptr (i64 8 to i64 ()*)() #7
  %18 = call i32 inttoptr (i64 25 to i32 (i8*, i8*, i64, i8*, i32)*)(i8* %14, i8* %16, i64 %17, i8* %7, i32 32) #7
  br label %19

; <label>:19                                      ; preds = %0, %5
  ret i32 0
}

; Function Attrs: alwaysinline argmemonly nounwind
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #4

; Function Attrs: alwaysinline nounwind
declare i64 @llvm.bpf.pseudo(i64, i64) #5

; Function Attrs: alwaysinline nounwind readnone
declare i32 @llvm.bswap.i32(i32) #6

; Function Attrs: alwaysinline nounwind readnone
declare i64 @llvm.bswap.i64(i64) #6

attributes #0 = { alwaysinline inlinehint norecurse nounwind readonly "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { alwaysinline nounwind readonly }
attributes #2 = { alwaysinline inlinehint nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #3 = { alwaysinline nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #4 = { alwaysinline argmemonly nounwind }
attributes #5 = { alwaysinline nounwind }
attributes #6 = { alwaysinline nounwind readnone }
attributes #7 = { nounwind }

!llvm.ident = !{!0}

!0 = !{!"clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"}
LLVM ERROR: Inline asm not supported by this streamer because we don't have an asm parser for this target

_______________________________________________
iovisor-dev mailing list
[email protected]
https://lists.iovisor.org/mailman/listinfo/iovisor-dev

Reply via email to