augmented_raw_syscalls.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
  4. *
  5. * Test it with:
  6. *
  7. * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
  8. *
  9. * This exactly matches what is marshalled into the raw_syscall:sys_enter
  10. * payload expected by the 'perf trace' beautifiers.
  11. *
  12. * For now it just uses the existing tracepoint augmentation code in 'perf
  13. * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
  14. * code that will combine entry/exit in a strace like way.
  15. */
  16. #include <stdio.h>
  17. #include <linux/socket.h>
  18. /* bpf-output associated map */
  19. struct bpf_map SEC("maps") __augmented_syscalls__ = {
  20. .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  21. .key_size = sizeof(int),
  22. .value_size = sizeof(u32),
  23. .max_entries = __NR_CPUS__,
  24. };
  25. struct syscall_enter_args {
  26. unsigned long long common_tp_fields;
  27. long syscall_nr;
  28. unsigned long args[6];
  29. };
  30. struct syscall_exit_args {
  31. unsigned long long common_tp_fields;
  32. long syscall_nr;
  33. long ret;
  34. };
  35. struct augmented_filename {
  36. unsigned int size;
  37. int reserved;
  38. char value[256];
  39. };
  40. #define SYS_OPEN 2
  41. #define SYS_OPENAT 257
  42. SEC("raw_syscalls:sys_enter")
  43. int sys_enter(struct syscall_enter_args *args)
  44. {
  45. struct {
  46. struct syscall_enter_args args;
  47. struct augmented_filename filename;
  48. } augmented_args;
  49. unsigned int len = sizeof(augmented_args);
  50. const void *filename_arg = NULL;
  51. probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
  52. /*
  53. * Yonghong and Edward Cree sayz:
  54. *
  55. * https://www.spinics.net/lists/netdev/msg531645.html
  56. *
  57. * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
  58. * >> 10: (bf) r1 = r6
  59. * >> 11: (07) r1 += 16
  60. * >> 12: (05) goto pc+2
  61. * >> 15: (79) r3 = *(u64 *)(r1 +0)
  62. * >> dereference of modified ctx ptr R1 off=16 disallowed
  63. * > Aha, we at least got a different error message this time.
  64. * > And indeed llvm has done that optimisation, rather than the more obvious
  65. * > 11: r3 = *(u64 *)(r1 +16)
  66. * > because it wants to have lots of reads share a single insn. You may be able
  67. * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
  68. * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
  69. * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
  70. *
  71. * The optimization mostly likes below:
  72. *
  73. * br1:
  74. * ...
  75. * r1 += 16
  76. * goto merge
  77. * br2:
  78. * ...
  79. * r1 += 20
  80. * goto merge
  81. * merge:
  82. * *(u64 *)(r1 + 0)
  83. *
  84. * The compiler tries to merge common loads. There is no easy way to
  85. * stop this compiler optimization without turning off a lot of other
  86. * optimizations. The easiest way is to add barriers:
  87. *
  88. * __asm__ __volatile__("": : :"memory")
  89. *
  90. * after the ctx memory access to prevent their down stream merging.
  91. */
  92. switch (augmented_args.args.syscall_nr) {
  93. case SYS_OPEN: filename_arg = (const void *)args->args[0];
  94. __asm__ __volatile__("": : :"memory");
  95. break;
  96. case SYS_OPENAT: filename_arg = (const void *)args->args[1];
  97. break;
  98. }
  99. if (filename_arg != NULL) {
  100. augmented_args.filename.reserved = 0;
  101. augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
  102. sizeof(augmented_args.filename.value),
  103. filename_arg);
  104. if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
  105. len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
  106. len &= sizeof(augmented_args.filename.value) - 1;
  107. }
  108. } else {
  109. len = sizeof(augmented_args.args);
  110. }
  111. perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
  112. return 0;
  113. }
  114. SEC("raw_syscalls:sys_exit")
  115. int sys_exit(struct syscall_exit_args *args)
  116. {
  117. return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
  118. }
  119. license(GPL);