dropdump.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. /*
  2. * Monitoring code for network dropped packet alerts
  3. *
  4. * Copyright (C) 2018 SAMSUNG Electronics, Co,LTD
  5. */
  6. #include <net/ip.h>
  7. #include <net/tcp.h>
  8. #if defined(CONFIG_ANDROID_VENDOR_HOOKS)
  9. #include <trace/hooks/net.h>
  10. #endif
  11. #include <trace/events/skb.h>
  12. #include <net/dropdump.h>
  13. int debug_drd = 0;
  14. #define drd_info(fmt, ...) pr_info("drd: %s: " pr_fmt(fmt), __func__, ##__VA_ARGS__)
  15. #define drd_dbg(flag, ...) \
  16. do { \
  17. if (unlikely(debug_drd & flag)) { drd_info(__VA_ARGS__); } \
  18. else {} \
  19. } while (0)
  20. DEFINE_RATELIMIT_STATE(drd_ratelimit_print, 1 * HZ, 10);
  21. #define drd_limit(...) \
  22. do { \
  23. if (__ratelimit(&drd_ratelimit_print)) \
  24. drd_info(__VA_ARGS__); \
  25. } while (0)
  26. DEFINE_RATELIMIT_STATE(drd_ratelimit_pkt, 1 * HZ, 32);
  27. struct list_head ptype_log __read_mostly;
  28. EXPORT_SYMBOL_GPL(ptype_log);
  29. int support_dropdump;
  30. EXPORT_SYMBOL_GPL(support_dropdump);
  31. extern struct list_head ptype_all;
  32. struct st_item hmap[DRD_HSIZE];
  33. spinlock_t hlock;
  34. u64 hmap_count;
  35. u64 hdup_count;
  36. uint hmax_depth;
  37. u16 skip_count;
  38. u64 dropped_count;
  39. #ifdef DRD_WQ
  40. struct _drd_worker drd_worker;
  41. unsigned int budget_default = BUDGET_DEFAULT;
  42. unsigned int budget_limit;
  43. #define BUDGET_MAX (budget_default << 2)
  44. #define LIST_MAX (BUDGET_MAX << 2)
  45. #endif
  46. void init_st_item(struct st_item *item)
  47. {
  48. INIT_LIST_HEAD(&item->list);
  49. item->p = 0;
  50. item->matched = 0;
  51. item->st[0] = '\n';
  52. }
  53. int get_hkey(u64 *hvalue)
  54. {
  55. u64 key = 0;
  56. u64 src = *hvalue & 0x00000000ffffffff;
  57. while (src) {
  58. key += src & 0x000000ff;
  59. src >>= 8;
  60. }
  61. key %= DRD_HSIZE;
  62. return (int)key;
  63. }
  64. char *get_hmap(u64 *hvalue)
  65. {
  66. int hkey = get_hkey(hvalue);
  67. struct st_item *lookup = &hmap[hkey];
  68. uint depth = 1;
  69. do {
  70. drd_dbg(DEBUG_HASH, "hvalue search[%d]: <%pK|%pK|%pK> p:[%llx], hvalue:{%llx}\n",
  71. hkey, lookup, lookup->list.next, &hmap[hkey], lookup->p, *hvalue);
  72. if (lookup->p == *hvalue) {
  73. drd_dbg(DEBUG_HASH, "hvalue found: '%s'\n", lookup->st);
  74. if (lookup->matched < 0xffffffffffffffff)
  75. lookup->matched++;
  76. if (depth >=3 && lookup->matched > ((struct st_item *)hmap[hkey].list.next)->matched) {
  77. // simply reorder the list by matched count, except the hmap array head
  78. list_del(&lookup->list);
  79. __list_add(&lookup->list, &hmap[hkey].list, hmap[hkey].list.next);
  80. }
  81. return lookup->st;
  82. }
  83. lookup = (struct st_item *)lookup->list.next;
  84. if (hmax_depth < ++depth)
  85. hmax_depth = depth;
  86. } while (lookup != &hmap[hkey]);
  87. drd_dbg(DEBUG_HASH, "hvalue not found\n");
  88. return NULL;
  89. }
  90. char *set_hmap(u64 *hvalue)
  91. {
  92. int hkey = get_hkey(hvalue);
  93. struct st_item *newItem = NULL;
  94. bool first_hit = false;
  95. drd_dbg(DEBUG_HASH, "hvalue {%d}: <%llx> %llx\n", hkey, *hvalue, hmap[hkey].p);
  96. if (hmap[hkey].p == 0) {
  97. newItem = &hmap[hkey];
  98. first_hit = true;
  99. } else {
  100. newItem = kmalloc(sizeof(struct st_item), GFP_ATOMIC);
  101. if (newItem == NULL) {
  102. drd_dbg(DEBUG_HASH, "fail to alloc\n");
  103. spin_unlock_bh(&hlock);
  104. return NULL;
  105. }
  106. init_st_item(newItem);
  107. list_add_tail(&newItem->list, &hmap[hkey].list);
  108. hdup_count++;
  109. }
  110. newItem->p = *hvalue;
  111. hmap_count++;
  112. spin_unlock_bh(&hlock);
  113. snprintf(newItem->st, ST_SIZE, "%pS", (void *)*hvalue);
  114. drd_dbg(DEBUG_HASH, "{%d:%d} <%pK> '%s'\n", hkey, first_hit, hvalue, newItem->st);
  115. return newItem->st;
  116. }
  117. /* use direct call instead of recursive stack trace */
  118. u64 __stack(int depth)
  119. {
  120. u64 *func = NULL;
  121. switch (depth + ST_START) {
  122. case 3 :
  123. func = __builtin_return_address(3);
  124. break;
  125. case 4 :
  126. func = __builtin_return_address(4);
  127. break;
  128. case 5 :
  129. func = __builtin_return_address(5);
  130. break;
  131. case 6 :
  132. func = __builtin_return_address(6);
  133. break;
  134. case 7 :
  135. func = __builtin_return_address(7);
  136. break;
  137. case 8 :
  138. func = __builtin_return_address(8);
  139. break;
  140. case 9 :
  141. func = __builtin_return_address(9);
  142. break;
  143. case 10 :
  144. func = __builtin_return_address(10);
  145. break;
  146. case 11 :
  147. func = __builtin_return_address(11);
  148. break;
  149. case 12 :
  150. func = __builtin_return_address(12);
  151. break;
  152. case 13 :
  153. func = __builtin_return_address(13);
  154. break;
  155. case 14 :
  156. func = __builtin_return_address(14);
  157. break;
  158. case 15 :
  159. func = __builtin_return_address(15);
  160. break;
  161. case 16 :
  162. func = __builtin_return_address(16);
  163. break;
  164. case 17 :
  165. func = __builtin_return_address(17);
  166. break;
  167. case 18 :
  168. func = __builtin_return_address(18);
  169. break;
  170. case 19 :
  171. func = __builtin_return_address(19);
  172. break;
  173. case 20 :
  174. func = __builtin_return_address(20);
  175. break;
  176. case 21 :
  177. func = __builtin_return_address(21);
  178. break;
  179. case 22 :
  180. func = __builtin_return_address(22);
  181. break;
  182. case 23 :
  183. func = __builtin_return_address(23);
  184. break;
  185. case 24 :
  186. func = __builtin_return_address(24);
  187. break;
  188. case 25 :
  189. func = __builtin_return_address(25);
  190. break;
  191. default :
  192. return 0;
  193. }
  194. return (u64)func;
  195. }
  196. #define NOT_TRACE (0xDD)
  197. #define FIN_TRACE 1
  198. #define ACK_TRACE 2
  199. #define GET_TRACE 3
  200. int chk_stack(char *pos, int net_pkt)
  201. {
  202. /* stop tracing */
  203. if (!strncmp(pos + 4, "f_nbu", 5))// __qdf_nbuf_free
  204. return NOT_TRACE;
  205. if (!strncmp(pos, "unix", 4)) // unix_xxx
  206. return NOT_TRACE;
  207. if (!strncmp(pos + 2, "tlin", 4)) // netlink_xxx
  208. return NOT_TRACE;
  209. if (!strncmp(pos, "tpac", 4)) // tpacket_rcv
  210. return NOT_TRACE;
  211. if (!strncmp(pos, "drd", 3)) // drd_xxx
  212. return NOT_TRACE;
  213. if (!strncmp(pos + 1, "_sk_d", 5))// __sk_destruct
  214. return NOT_TRACE;
  215. #ifdef EXTENDED_DROPDUMP
  216. /* ignore normally consumed packets on TX path */
  217. if (!strncmp(pos + 2, "it_on", 5))// xmit_one
  218. return NOT_TRACE;
  219. if (!strncmp(pos + 2, "t_tx_", 5))// net_tx_action
  220. return NOT_TRACE;
  221. if (!strncmp(pos, "dp_tx", 5)) //dp_tx_comp_xxx
  222. return NOT_TRACE;
  223. /* prevent recursive call by __kfree_skb() */
  224. if (!strncmp(pos + 4, "ree_s", 5))// __kfree_skb
  225. return NOT_TRACE;
  226. #endif
  227. /* end of callstack */
  228. if (!strncmp(pos, "loc", 3))// local_*
  229. return FIN_TRACE;
  230. if (!strncmp(pos + 7, "ftir", 4))// __do_softirq
  231. return FIN_TRACE;
  232. if (!strncmp(pos + 7, "rk_r", 4))// task_work_run
  233. return FIN_TRACE;
  234. if (!strncmp(pos, "SyS", 3)) // SyS_xxx
  235. return FIN_TRACE;
  236. if (!strncmp(pos, "ret_", 4)) // ret_from_xxx
  237. return FIN_TRACE;
  238. if (!strncmp(pos, "el", 2)) // el*
  239. return FIN_TRACE;
  240. if (!strncmp(pos, "gic", 3)) // gic_xxx
  241. return FIN_TRACE;
  242. if (!strncmp(pos + 3, "rt_ke", 5))// start_kernel
  243. return FIN_TRACE;
  244. if (!strncmp(pos + 13, "rt_ke", 5))// secondary_start_kernel
  245. return FIN_TRACE;
  246. /* network pkt */
  247. if (!net_pkt) {
  248. if (!strncmp(pos, "net", 3))
  249. return GET_TRACE;
  250. if (!strncmp(pos, "tcp", 3)) {
  251. // packet from tcp_drop() could be normal operation.
  252. // don't logging pure ack.
  253. if (!strncmp(pos, "tcp_drop", 8))
  254. return ACK_TRACE;
  255. return GET_TRACE;
  256. }
  257. if (!strncmp(pos, "ip", 2))
  258. return GET_TRACE;
  259. if (!strncmp(pos, "icmp", 4))
  260. return GET_TRACE;
  261. if (!strncmp(pos, "xfr", 3))
  262. return GET_TRACE;
  263. }
  264. return 0;
  265. }
  266. static bool _is_tcp_ack(struct sk_buff *skb)
  267. {
  268. switch (skb->protocol) {
  269. /* TCPv4 ACKs */
  270. case htons(ETH_P_IP):
  271. if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
  272. (ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2) ==
  273. tcp_hdr(skb)->doff << 2) &&
  274. ((tcp_flag_word(tcp_hdr(skb)) &
  275. cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
  276. return true;
  277. break;
  278. /* TCPv6 ACKs */
  279. case htons(ETH_P_IPV6):
  280. if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
  281. (ntohs(ipv6_hdr(skb)->payload_len) ==
  282. (tcp_hdr(skb)->doff) << 2) &&
  283. ((tcp_flag_word(tcp_hdr(skb)) &
  284. cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
  285. return true;
  286. break;
  287. }
  288. return false;
  289. }
  290. static inline bool is_tcp_ack(struct sk_buff *skb)
  291. {
  292. if (skb_is_tcp_pure_ack(skb))
  293. return true;
  294. if (unlikely(_is_tcp_ack(skb)))
  295. return true;
  296. return false;
  297. }
  298. int symbol_lookup(u64 *addr, int net_pkt) {
  299. char *symbol = NULL;
  300. spin_lock_bh(&hlock);
  301. symbol = get_hmap(addr);
  302. if (symbol != NULL)
  303. spin_unlock_bh(&hlock);
  304. else
  305. symbol = set_hmap(addr);
  306. memcpy((char *)addr, symbol, strlen(symbol));
  307. return chk_stack(symbol, net_pkt);
  308. }
  309. u8 get_stack(struct sk_buff *skb, struct sk_buff *dmy, unsigned int offset, unsigned int reason)
  310. {
  311. u8 depth = 0, max_depth = ST_MAX;
  312. struct _dmy_info *dmy_info = (struct _dmy_info *)(dmy->data + offset);
  313. u64 *stack_base = &dmy_info->stack;
  314. #ifdef DRD_WQ
  315. // sometimes __builtin_return_address() returns invalid address for deep stack of
  316. // ksoftirq or kworker, and migration task. limit the maximun depth for them.
  317. if ((current->comm[0] == 'k' && (current->comm[4] == 't' || current->comm[4] == 'k')) ||
  318. (current->comm[0] == 'm' && current->comm[4] == 'a')) {
  319. dmy_info->flag |= LIMIT_DEPTH_BIT;
  320. max_depth >>= 1;
  321. }
  322. #else
  323. int chk = 0, net_pkt = 0;
  324. #endif
  325. if (skb->tstamp >> 48 < 5000) {
  326. // packet has kernel timestamp, not utc.
  327. // using a zero-value for updating to utc at tpacket_rcv()
  328. dmy_info->flag |= UPDATE_TIME_BIT;
  329. dmy->tstamp = 0;
  330. } else {
  331. // using utc of original packet
  332. dmy->tstamp = skb->tstamp;
  333. }
  334. drd_dbg(DEBUG_SAVE, "trace <%pK>\n", skb);
  335. for (depth = 0; depth < max_depth; depth++) {
  336. *stack_base = __stack(depth);
  337. #ifdef DRD_WQ
  338. drd_dbg(DEBUG_SAVE, "%02d: <%pK>\n", depth, (u64 *)*stack_base);
  339. if (*stack_base == 0) {
  340. // __builtin_return_address() returned root stack
  341. depth--;
  342. break;
  343. }
  344. #else
  345. /* functions that instead of when set_stack_work not used */
  346. chk = symbol_lookup(stack_base, net_pkt);
  347. drd_dbg(DEBUG_SAVE, "[%2d:%d] <%s>\n", depth, chk, (char *)stack_base);
  348. if (chk == NOT_TRACE) {
  349. drd_dbg(DEBUG_TRACE, "not target stack\n");
  350. return NOT_TRACE;
  351. }
  352. if (chk == FIN_TRACE)
  353. break;
  354. if (chk == ACK_TRACE) {
  355. if (is_tcp_ack(skb)) {
  356. drd_dbg(DEBUG_TRACE, "don't trace tcp ack\n");
  357. return NOT_TRACE;
  358. } else {
  359. net_pkt = 1;
  360. }
  361. }
  362. if (chk == GET_TRACE)
  363. net_pkt = 1;
  364. #endif
  365. stack_base += (ST_SIZE / sizeof(u64));
  366. }
  367. memcpy(dmy_info->magic, "DRD", 3);
  368. dmy_info->depth = depth;
  369. if (skip_count > 0) {
  370. dmy_info->skip_count = skip_count;
  371. skip_count = 0;
  372. }
  373. dmy_info->count = ++dropped_count;
  374. dmy_info->reason_id = reason;
  375. if (reason < DRD_REASON_MAX) {
  376. memcpy(dmy_info->reason_str, drd_reasons[reason], min(16, (int)strlen(drd_reasons[reason])));
  377. } else {
  378. memcpy(dmy_info->reason_str, "UNDEFINED_REASON", 16);
  379. }
  380. drd_dbg(DEBUG_RAW, "<%pK:%pK> %*ph\n", dmy, dmy_info, 16, dmy_info);
  381. return depth;
  382. }
  383. int set_stack_work(struct sk_buff *skb, struct _dmy_info *dmy_info)
  384. {
  385. int chk = 0, net_pkt = 0;
  386. u8 depth;
  387. u64 *stack_base;
  388. drd_dbg(DEBUG_RAW, "<%pK:%pK> %*ph\n", skb, dmy_info, 16, dmy_info);
  389. if (strncmp(dmy_info->magic, "DRD", 3)) {
  390. drd_dbg(DEBUG_TRACE, "invalid magic <%pK>\n", skb);
  391. return -1;
  392. }
  393. stack_base = &dmy_info->stack;
  394. for (depth = 0; depth < dmy_info->depth; depth++) {
  395. chk = symbol_lookup(stack_base, net_pkt);
  396. drd_dbg(DEBUG_RESTORE, "[%2d:%d] <%s>\n", depth, chk, (char *)stack_base);
  397. if (chk == NOT_TRACE) {
  398. drd_dbg(DEBUG_TRACE, "not target stack\n");
  399. return NOT_TRACE;
  400. }
  401. if (chk == FIN_TRACE)
  402. break;
  403. if (chk == ACK_TRACE) {
  404. if (is_tcp_ack(skb)) {
  405. drd_dbg(DEBUG_TRACE, "don't trace tcp ack\n");
  406. return NOT_TRACE;
  407. } else {
  408. net_pkt = 1;
  409. }
  410. }
  411. if (chk == GET_TRACE)
  412. net_pkt = 1;
  413. stack_base += (ST_SIZE / sizeof(u64));
  414. }
  415. if (net_pkt == 0) {
  416. drd_dbg(DEBUG_TRACE, "not defined packet\n");
  417. return -3;
  418. }
  419. return depth;
  420. }
  421. #ifdef DRD_WQ
  422. static void save_pkt_work(struct work_struct *ws)
  423. {
  424. struct sk_buff *skb, *next;
  425. struct packet_type *ptype = NULL;
  426. struct _dmy_info *dmy_info = NULL;
  427. int st_depth = 0;
  428. u16 budget = 0;
  429. list_for_each_entry_safe(skb, next, &drd_worker.list, list) {
  430. spin_lock_bh(&drd_worker.lock);
  431. if (support_dropdump) {
  432. list_for_each_entry_rcu(ptype, &ptype_log, list) {
  433. if (ptype != NULL)
  434. break;
  435. }
  436. drd_dbg(DEBUG_LOG, "del %u:%llu <%llx>\n", budget, drd_worker.num, (u64)(skb));
  437. skb_list_del_init(skb);
  438. drd_worker.num--;
  439. } else {
  440. spin_unlock_bh(&drd_worker.lock);
  441. return;
  442. }
  443. spin_unlock_bh(&drd_worker.lock);
  444. if (ptype == NULL || list_empty(&ptype->list)) {
  445. drd_dbg(DEBUG_LOG,"pt list not ready\n");
  446. __kfree_skb(skb);
  447. continue;
  448. }
  449. dmy_info = (struct _dmy_info *)(skb->data + PKTINFO_OFFSET(skb));
  450. st_depth = set_stack_work(skb, dmy_info);
  451. if (st_depth != NOT_TRACE) {
  452. ptype->func(skb, skb->dev, ptype, skb->dev);
  453. } else {
  454. __kfree_skb(skb);
  455. }
  456. if (++budget >= budget_limit)
  457. break;
  458. }
  459. if (!list_empty(&drd_worker.list)) {
  460. if (budget_limit < BUDGET_MAX)
  461. budget_limit <<= 1;
  462. queue_delayed_work(drd_worker.wq, &drd_worker.dwork, msecs_to_jiffies(1));
  463. drd_dbg(DEBUG_LOG, "pkt remained(%llu), trigger again. budget:%d\n", drd_worker.num, budget_limit);
  464. } else {
  465. drd_worker.num = 0;
  466. }
  467. return;
  468. }
  469. #else
  470. void save_pkt(struct sk_buff *skb)
  471. {
  472. struct packet_type *ptype = NULL;
  473. rcu_read_lock();
  474. list_for_each_entry_rcu(ptype, &ptype_log, list) {
  475. if (ptype != NULL)
  476. break;
  477. }
  478. if (ptype == NULL || list_empty(&ptype->list)) {
  479. drd_dbg(DEBUG_LOG,"pt list not ready\n");
  480. __kfree_skb(skb);
  481. goto out;
  482. }
  483. drd_dbg(DEBUG_LOG, "%llu <%llx>\n", dropped_count, (u64)(skb));
  484. ptype->func(skb, skb->dev, ptype, skb->dev);
  485. out:
  486. rcu_read_unlock();
  487. }
  488. #endif
  489. int skb_validate(struct sk_buff *skb)
  490. {
  491. if (virt_addr_valid(skb) && virt_addr_valid(skb->dev)) {
  492. struct iphdr *ip4hdr = (struct iphdr *)skb_network_header(skb);
  493. if (skb->protocol != htons(ETH_P_IPV6)
  494. && skb->protocol != htons(ETH_P_IP))
  495. return -1;
  496. switch (skb->dev->name[0]) {
  497. case 'r': // rmnet*
  498. case 'v': // v4-rmnet*
  499. case 't': // tun
  500. case 'e': // epdg
  501. break;
  502. case 'l': // lo
  503. case 'b': // bt*
  504. case 'w': // wlan
  505. case 's': // swlan
  506. if (__ratelimit(&drd_ratelimit_pkt))
  507. break;
  508. if (skip_count < 0xffff)
  509. skip_count++;
  510. dropped_count++;
  511. return -9;
  512. default:
  513. drd_dbg(DEBUG_LOG, "invalid dev: %s\n", skb->dev->name);
  514. return -2;
  515. }
  516. if (unlikely((ip4hdr->version != 4 && ip4hdr->version != 6)
  517. || ip4hdr->id == 0x6b6b))
  518. return -3;
  519. if (unlikely(!skb->len))
  520. return -4;
  521. if (unlikely(skb->len > skb->tail))
  522. return -5;
  523. if (unlikely(skb->data <= skb->head))
  524. return -6;
  525. if (unlikely(skb->tail > skb->end))
  526. return -7;
  527. if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
  528. return -8;
  529. drd_dbg(DEBUG_RAW, "ndev: %s\n", skb->dev->name);
  530. return 0;
  531. }
  532. return -255;
  533. }
  534. struct sk_buff *get_dummy(struct sk_buff *skb, unsigned int reason)//, char *pos, int st_depth)
  535. {
  536. struct sk_buff *dummy = NULL;
  537. struct skb_shared_info *shinfo;
  538. unsigned int copy_len = PKTINFO_COPYLEN_MAX;
  539. unsigned int copy_buf_len = PKTINFO_COPYLEN_MAX;
  540. unsigned int org_len, dummy_len;
  541. u8 ret = 0;
  542. struct iphdr *ip4hdr = (struct iphdr *)(skb_network_header(skb));
  543. struct ipv6hdr *ip6hdr;
  544. if (ip4hdr->version == 4) {
  545. org_len = ntohs(ip4hdr->tot_len);
  546. } else {
  547. ip6hdr = (struct ipv6hdr *)ip4hdr;
  548. org_len = skb_network_header_len(skb) + ntohs(ip6hdr->payload_len);
  549. }
  550. if (org_len < PKTINFO_COPYLEN_MAX) {
  551. copy_len = org_len;
  552. copy_buf_len = round_up(org_len, 0x10);
  553. }
  554. dummy_len = copy_buf_len + sizeof(struct _dmy_info) + ST_BUF_SIZE;
  555. dummy = alloc_skb(dummy_len, GFP_ATOMIC);
  556. if (unlikely(!dummy)) {
  557. drd_dbg(DEBUG_LOG, "alloc fail, %u\n", dummy_len);
  558. return NULL;
  559. }
  560. drd_dbg(DEBUG_SAVE, "skb->len:%u org_len:%u copy_len:%u copy_buf_len:%u dummy_len:%u\n",
  561. skb->len, org_len, copy_len, copy_buf_len, dummy_len);
  562. dummy->dev = skb->dev;
  563. dummy->protocol = skb->protocol;
  564. dummy->ip_summed = CHECKSUM_UNNECESSARY;
  565. refcount_set(&skb->users, 1);
  566. skb_put(dummy, dummy_len);
  567. skb_reset_mac_header(dummy);
  568. skb_reset_network_header(dummy);
  569. skb_set_transport_header(dummy, skb_network_header_len(skb));
  570. shinfo = skb_shinfo(dummy);
  571. memset(shinfo, 0, sizeof(struct skb_shared_info));
  572. atomic_set(&shinfo->dataref, 1);
  573. INIT_LIST_HEAD(&dummy->list);
  574. memcpy(dummy->data, skb_network_header(skb), copy_len);
  575. memset((void *)((u64)dummy->data + (u64)copy_len), 0,
  576. 0x10 - (copy_len % 0x10) + sizeof(struct _dmy_info) + ST_BUF_SIZE);
  577. ret = get_stack(skb, dummy, copy_buf_len, reason);
  578. if (ret != NOT_TRACE) {
  579. PKTINFO_OFFSET(dummy) = copy_buf_len;
  580. } else {
  581. drd_dbg(DEBUG_SAVE, "not saving pkt\n");
  582. __kfree_skb(dummy);
  583. return NULL;
  584. }
  585. return dummy;
  586. }
  587. void drd_kfree_skb(struct sk_buff *skb, unsigned int reason)
  588. {
  589. struct sk_buff *dmy;
  590. #ifdef DRD_WQ
  591. struct sk_buff *next;
  592. #endif
  593. if (support_dropdump < 2) {
  594. #ifdef DRD_WQ
  595. if (drd_worker.num) {
  596. drd_dbg(DEBUG_LOG, "purge drd list\n");
  597. cancel_delayed_work(&drd_worker.dwork);
  598. spin_lock_bh(&drd_worker.lock);
  599. list_for_each_entry_safe(dmy, next, &drd_worker.list, list) {
  600. skb_list_del_init(dmy);
  601. __kfree_skb(dmy);
  602. }
  603. drd_worker.num = 0;
  604. spin_unlock_bh(&drd_worker.lock);
  605. }
  606. #endif
  607. return;
  608. }
  609. if (skb_validate(skb))
  610. return;
  611. #ifdef DRD_WQ
  612. if (unlikely(drd_worker.num >= LIST_MAX - 1)) {
  613. drd_dbg(DEBUG_LOG, "drd list full\n");
  614. return;
  615. }
  616. #endif
  617. dmy = get_dummy(skb, reason);
  618. if (dmy == NULL)
  619. return;
  620. #ifdef DRD_WQ
  621. spin_lock_bh(&drd_worker.lock);
  622. if (support_dropdump) {
  623. list_add_tail(&dmy->list, &drd_worker.list);
  624. drd_worker.num++;
  625. drd_dbg(DEBUG_LOG, "add %llu <%pK>\n", drd_worker.num, dmy);
  626. }
  627. spin_unlock_bh(&drd_worker.lock);
  628. budget_limit = budget_default;
  629. queue_delayed_work(drd_worker.wq, &drd_worker.dwork, 0);
  630. #else
  631. save_pkt(dmy);
  632. #endif
  633. }
  634. EXPORT_SYMBOL_GPL(drd_kfree_skb);
  635. void drd_ptype_head(const struct packet_type *pt, struct list_head *vendor_pt)
  636. {
  637. if (pt->type == htons(ETH_P_LOG))
  638. vendor_pt->next = &ptype_log;
  639. }
  640. EXPORT_SYMBOL_GPL(drd_ptype_head);
  641. #if defined(CONFIG_ANDROID_VENDOR_HOOKS)
  642. static void drd_ptype_head_handler(void *data, const struct packet_type *pt, struct list_head *vendor_pt)
  643. {
  644. drd_ptype_head(pt, vendor_pt);
  645. }
  646. #else
  647. /* can't use macro directing the drd_xxx functions instead of lapper. *
  648. * because of have to use EXPORT_SYMBOL macro for module parts. *
  649. * it should to be used at here with it's definition */
  650. void trace_android_vh_ptype_head(const struct packet_type *pt, struct list_head *vendor_pt)
  651. {
  652. drd_ptype_head(pt, vendor_pt);
  653. }
  654. EXPORT_SYMBOL_GPL(trace_android_vh_ptype_head);
  655. #endif
  656. #if defined(TRACE_SKB_DROP_REASON) || defined(DEFINE_DROP_REASON)
  657. static void drd_kfree_skb_handler(void *data, struct sk_buff *skb,
  658. void *location, enum skb_drop_reason reason)
  659. {
  660. #else
  661. static void drd_kfree_skb_handler(void *data, struct sk_buff *skb, void *location)
  662. {
  663. unsigned int reason = 0;
  664. #endif
  665. drd_kfree_skb(skb, (unsigned int)reason);
  666. }
  667. struct kobject *drd_kobj;
  668. int get_attr_input(const char *buf, int *val)
  669. {
  670. int ival;
  671. int err = kstrtoint(buf, 0, &ival);
  672. if (err >= 0)
  673. *val = ival;
  674. else
  675. drd_info("invalid input: %s\n", buf);
  676. return err;
  677. }
  678. static ssize_t hstat_show(struct kobject *kobj,
  679. struct kobj_attribute *attr, char *buf)
  680. {
  681. return sprintf(buf,
  682. "stack : total %d, used %lld, dupplicated %llu, max_depth %u, dropped %llu\n",
  683. DRD_HSIZE, hmap_count, hdup_count, hmax_depth, dropped_count);
  684. }
  685. static struct kobj_attribute hstat_attribute = {
  686. .attr = {.name = "hstat", .mode = 0660},
  687. .show = hstat_show,
  688. };
  689. static ssize_t hmap_show(struct kobject *kobj,
  690. struct kobj_attribute *attr, char *buf)
  691. {
  692. int i;
  693. struct st_item *lookup;
  694. for (i = 0; i < DRD_HSIZE; i++) {
  695. lookup = &hmap[i];
  696. drd_info("---------------------------------------------------------------------\n");
  697. do {
  698. drd_info("%03d <%llx:%llu> '%s'\n", i, lookup->p, lookup->matched, lookup->st);
  699. lookup = (struct st_item *)lookup->list.next;
  700. } while (lookup != &hmap[i]);
  701. }
  702. drd_info("---------------------------------------------------------------------\n");
  703. return sprintf(buf, "hmap checked\n");
  704. }
  705. static struct kobj_attribute hmap_attribute = {
  706. .attr = {.name = "hmap", .mode = 0660},
  707. .show = hmap_show,
  708. };
  709. static ssize_t debug_drd_show(struct kobject *kobj,
  710. struct kobj_attribute *attr, char *buf)
  711. {
  712. return sprintf(buf, "current debug_drd: %d (0x%x)\n", debug_drd, debug_drd);
  713. }
  714. ssize_t debug_drd_store(struct kobject *kobj, struct kobj_attribute *attr,
  715. const char *buf, size_t count)
  716. {
  717. if (get_attr_input(buf, &debug_drd) >= 0)
  718. drd_info("debug_drd = %d\n", debug_drd);
  719. return count;
  720. }
  721. static struct kobj_attribute debug_drd_attribute = {
  722. .attr = {.name = "debug_drd", .mode = 0660},
  723. .show = debug_drd_show,
  724. .store = debug_drd_store,
  725. };
  726. #ifdef DRD_WQ
  727. static ssize_t budget_default_show(struct kobject *kobj,
  728. struct kobj_attribute *attr, char *buf)
  729. {
  730. return sprintf(buf, "current budget_default: %u\n", budget_default);
  731. }
  732. ssize_t budget_default_store(struct kobject *kobj, struct kobj_attribute *attr,
  733. const char *buf, size_t count)
  734. {
  735. if (get_attr_input(buf, &budget_default) >= 0)
  736. drd_info("budget_default = %u\n", budget_default);
  737. return count;
  738. }
  739. static struct kobj_attribute budget_default_attribute = {
  740. .attr = {.name = "budget_default", .mode = 0660},
  741. .show = budget_default_show,
  742. .store = budget_default_store,
  743. };
  744. #endif
  745. static struct attribute *dropdump_attrs[] = {
  746. &hstat_attribute.attr,
  747. &hmap_attribute.attr,
  748. &debug_drd_attribute.attr,
  749. #ifdef DRD_WQ
  750. &budget_default_attribute.attr,
  751. #endif
  752. NULL,
  753. };
  754. ATTRIBUTE_GROUPS(dropdump);
  755. static struct ctl_table drd_proc_table[] = {
  756. {
  757. .procname = "support_dropdump",
  758. .data = &support_dropdump,
  759. .maxlen = sizeof(int),
  760. .mode = 0644,
  761. .proc_handler = proc_dointvec,
  762. },
  763. #ifdef EXTENDED_DROPDUMP
  764. {
  765. .procname = "support_dropdump_ext",
  766. .data = &support_dropdump,
  767. .maxlen = sizeof(int),
  768. .mode = 0644,
  769. .proc_handler = proc_dointvec,
  770. },
  771. #endif
  772. { }
  773. };
  774. static int __init init_net_drop_dump(void)
  775. {
  776. int rc = 0, i;
  777. drd_info("\n");
  778. INIT_LIST_HEAD(&ptype_log);
  779. init_net.core.sysctl_hdr = register_net_sysctl(&init_net, "net/core", drd_proc_table);
  780. if (init_net.core.sysctl_hdr == NULL) {
  781. drd_info("init sysctrl failed\n");
  782. return -ENODEV;
  783. }
  784. #if defined(CONFIG_ANDROID_VENDOR_HOOKS)
  785. rc = register_trace_android_vh_ptype_head(drd_ptype_head_handler, NULL);
  786. #endif
  787. rc += register_trace_kfree_skb(drd_kfree_skb_handler, NULL);
  788. if (rc) {
  789. drd_info("fail to register android_trace\n");
  790. return -EIO;
  791. }
  792. #ifdef DRD_WQ
  793. drd_worker.wq = create_workqueue("drd_work");
  794. if (!drd_worker.wq) {
  795. drd_info("fail to create wq\n");
  796. return -ENOMEM;
  797. }
  798. INIT_DELAYED_WORK(&drd_worker.dwork, save_pkt_work);
  799. INIT_LIST_HEAD(&drd_worker.list);
  800. spin_lock_init(&drd_worker.lock);
  801. drd_worker.num = 0;
  802. #endif
  803. drd_kobj = kobject_create_and_add("dropdump", kernel_kobj);
  804. if (!drd_kobj) {
  805. drd_info("fail to create kobj\n");
  806. rc = -ENOMEM;
  807. goto kobj_error;
  808. }
  809. rc = sysfs_create_groups(drd_kobj, dropdump_groups);
  810. if (rc) {
  811. drd_info("fail to create attr\n");
  812. goto attr_error;
  813. }
  814. for (i = 0; i < DRD_HSIZE; i++) {
  815. init_st_item(&hmap[i]);
  816. }
  817. spin_lock_init(&hlock);
  818. support_dropdump = 0;
  819. goto out;
  820. attr_error:
  821. kobject_put(drd_kobj);
  822. kobj_error:
  823. #ifdef DRD_WQ
  824. destroy_workqueue(drd_worker.wq);
  825. #endif
  826. out:
  827. return rc;
  828. }
  829. static void exit_net_drop_dump(void)
  830. {
  831. drd_info("\n");
  832. support_dropdump = 0;
  833. }
  834. module_init(init_net_drop_dump);
  835. module_exit(exit_net_drop_dump);
  836. MODULE_LICENSE("GPL");
  837. MODULE_DESCRIPTION("Samsung dropdump module");