trace_probe_tmpl.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Traceprobe fetch helper inlines
  4. */
  5. static nokprobe_inline void
  6. fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf)
  7. {
  8. switch (code->size) {
  9. case 1:
  10. *(u8 *)buf = (u8)val;
  11. break;
  12. case 2:
  13. *(u16 *)buf = (u16)val;
  14. break;
  15. case 4:
  16. *(u32 *)buf = (u32)val;
  17. break;
  18. case 8:
  19. //TBD: 32bit signed
  20. *(u64 *)buf = (u64)val;
  21. break;
  22. default:
  23. *(unsigned long *)buf = val;
  24. }
  25. }
  26. static nokprobe_inline void
  27. fetch_apply_bitfield(struct fetch_insn *code, void *buf)
  28. {
  29. switch (code->basesize) {
  30. case 1:
  31. *(u8 *)buf <<= code->lshift;
  32. *(u8 *)buf >>= code->rshift;
  33. break;
  34. case 2:
  35. *(u16 *)buf <<= code->lshift;
  36. *(u16 *)buf >>= code->rshift;
  37. break;
  38. case 4:
  39. *(u32 *)buf <<= code->lshift;
  40. *(u32 *)buf >>= code->rshift;
  41. break;
  42. case 8:
  43. *(u64 *)buf <<= code->lshift;
  44. *(u64 *)buf >>= code->rshift;
  45. break;
  46. }
  47. }
  48. /*
  49. * These functions must be defined for each callsite.
  50. * Return consumed dynamic data size (>= 0), or error (< 0).
  51. * If dest is NULL, don't store result and return required dynamic data size.
  52. */
  53. static int
  54. process_fetch_insn(struct fetch_insn *code, void *rec,
  55. void *dest, void *base);
  56. static nokprobe_inline int fetch_store_strlen(unsigned long addr);
  57. static nokprobe_inline int
  58. fetch_store_string(unsigned long addr, void *dest, void *base);
  59. static nokprobe_inline int fetch_store_strlen_user(unsigned long addr);
  60. static nokprobe_inline int
  61. fetch_store_string_user(unsigned long addr, void *dest, void *base);
  62. static nokprobe_inline int
  63. probe_mem_read(void *dest, void *src, size_t size);
  64. static nokprobe_inline int
  65. probe_mem_read_user(void *dest, void *src, size_t size);
  66. static nokprobe_inline int
  67. fetch_store_symstrlen(unsigned long addr)
  68. {
  69. char namebuf[KSYM_SYMBOL_LEN];
  70. int ret;
  71. ret = sprint_symbol(namebuf, addr);
  72. if (ret < 0)
  73. return 0;
  74. return ret + 1;
  75. }
  76. /*
  77. * Fetch a null-terminated symbol string + offset. Caller MUST set *(u32 *)buf
  78. * with max length and relative data location.
  79. */
  80. static nokprobe_inline int
  81. fetch_store_symstring(unsigned long addr, void *dest, void *base)
  82. {
  83. int maxlen = get_loc_len(*(u32 *)dest);
  84. void *__dest;
  85. if (unlikely(!maxlen))
  86. return -ENOMEM;
  87. __dest = get_loc_data(dest, base);
  88. return sprint_symbol(__dest, addr);
  89. }
  90. /* From the 2nd stage, routine is same */
  91. static nokprobe_inline int
  92. process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
  93. void *dest, void *base)
  94. {
  95. struct fetch_insn *s3 = NULL;
  96. int total = 0, ret = 0, i = 0;
  97. u32 loc = 0;
  98. unsigned long lval = val;
  99. stage2:
  100. /* 2nd stage: dereference memory if needed */
  101. do {
  102. if (code->op == FETCH_OP_DEREF) {
  103. lval = val;
  104. ret = probe_mem_read(&val, (void *)val + code->offset,
  105. sizeof(val));
  106. } else if (code->op == FETCH_OP_UDEREF) {
  107. lval = val;
  108. ret = probe_mem_read_user(&val,
  109. (void *)val + code->offset, sizeof(val));
  110. } else
  111. break;
  112. if (ret)
  113. return ret;
  114. code++;
  115. } while (1);
  116. s3 = code;
  117. stage3:
  118. /* 3rd stage: store value to buffer */
  119. if (unlikely(!dest)) {
  120. switch (code->op) {
  121. case FETCH_OP_ST_STRING:
  122. ret = fetch_store_strlen(val + code->offset);
  123. code++;
  124. goto array;
  125. case FETCH_OP_ST_USTRING:
  126. ret = fetch_store_strlen_user(val + code->offset);
  127. code++;
  128. goto array;
  129. case FETCH_OP_ST_SYMSTR:
  130. ret = fetch_store_symstrlen(val + code->offset);
  131. code++;
  132. goto array;
  133. default:
  134. return -EILSEQ;
  135. }
  136. }
  137. switch (code->op) {
  138. case FETCH_OP_ST_RAW:
  139. fetch_store_raw(val, code, dest);
  140. break;
  141. case FETCH_OP_ST_MEM:
  142. probe_mem_read(dest, (void *)val + code->offset, code->size);
  143. break;
  144. case FETCH_OP_ST_UMEM:
  145. probe_mem_read_user(dest, (void *)val + code->offset, code->size);
  146. break;
  147. case FETCH_OP_ST_STRING:
  148. loc = *(u32 *)dest;
  149. ret = fetch_store_string(val + code->offset, dest, base);
  150. break;
  151. case FETCH_OP_ST_USTRING:
  152. loc = *(u32 *)dest;
  153. ret = fetch_store_string_user(val + code->offset, dest, base);
  154. break;
  155. case FETCH_OP_ST_SYMSTR:
  156. loc = *(u32 *)dest;
  157. ret = fetch_store_symstring(val + code->offset, dest, base);
  158. break;
  159. default:
  160. return -EILSEQ;
  161. }
  162. code++;
  163. /* 4th stage: modify stored value if needed */
  164. if (code->op == FETCH_OP_MOD_BF) {
  165. fetch_apply_bitfield(code, dest);
  166. code++;
  167. }
  168. array:
  169. /* the last stage: Loop on array */
  170. if (code->op == FETCH_OP_LP_ARRAY) {
  171. if (ret < 0)
  172. ret = 0;
  173. total += ret;
  174. if (++i < code->param) {
  175. code = s3;
  176. if (s3->op != FETCH_OP_ST_STRING &&
  177. s3->op != FETCH_OP_ST_USTRING) {
  178. dest += s3->size;
  179. val += s3->size;
  180. goto stage3;
  181. }
  182. code--;
  183. val = lval + sizeof(char *);
  184. if (dest) {
  185. dest += sizeof(u32);
  186. *(u32 *)dest = update_data_loc(loc, ret);
  187. }
  188. goto stage2;
  189. }
  190. code++;
  191. ret = total;
  192. }
  193. return code->op == FETCH_OP_END ? ret : -EILSEQ;
  194. }
  195. /* Sum up total data length for dynamic arrays (strings) */
  196. static nokprobe_inline int
  197. __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
  198. {
  199. struct probe_arg *arg;
  200. int i, len, ret = 0;
  201. for (i = 0; i < tp->nr_args; i++) {
  202. arg = tp->args + i;
  203. if (unlikely(arg->dynamic)) {
  204. len = process_fetch_insn(arg->code, regs, NULL, NULL);
  205. if (len > 0)
  206. ret += len;
  207. }
  208. }
  209. return ret;
  210. }
  211. /* Store the value of each argument */
  212. static nokprobe_inline void
  213. store_trace_args(void *data, struct trace_probe *tp, void *rec,
  214. int header_size, int maxlen)
  215. {
  216. struct probe_arg *arg;
  217. void *base = data - header_size;
  218. void *dyndata = data + tp->size;
  219. u32 *dl; /* Data location */
  220. int ret, i;
  221. for (i = 0; i < tp->nr_args; i++) {
  222. arg = tp->args + i;
  223. dl = data + arg->offset;
  224. /* Point the dynamic data area if needed */
  225. if (unlikely(arg->dynamic))
  226. *dl = make_data_loc(maxlen, dyndata - base);
  227. ret = process_fetch_insn(arg->code, rec, dl, base);
  228. if (arg->dynamic && likely(ret > 0)) {
  229. dyndata += ret;
  230. maxlen -= ret;
  231. }
  232. }
  233. }
  234. static inline int
  235. print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
  236. u8 *data, void *field)
  237. {
  238. void *p;
  239. int i, j;
  240. for (i = 0; i < nr_args; i++) {
  241. struct probe_arg *a = args + i;
  242. trace_seq_printf(s, " %s=", a->name);
  243. if (likely(!a->count)) {
  244. if (!a->type->print(s, data + a->offset, field))
  245. return -ENOMEM;
  246. continue;
  247. }
  248. trace_seq_putc(s, '{');
  249. p = data + a->offset;
  250. for (j = 0; j < a->count; j++) {
  251. if (!a->type->print(s, p, field))
  252. return -ENOMEM;
  253. trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
  254. p += a->type->size;
  255. }
  256. }
  257. return 0;
  258. }