x25_forward.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * History
  4. * 03-01-2007 Added forwarding for x.25 Andrew Hendry
  5. */
  6. #define pr_fmt(fmt) "X25: " fmt
  7. #include <linux/if_arp.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <net/x25.h>
  11. LIST_HEAD(x25_forward_list);
  12. DEFINE_RWLOCK(x25_forward_list_lock);
  13. int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
  14. struct sk_buff *skb, int lci)
  15. {
  16. struct x25_route *rt;
  17. struct x25_neigh *neigh_new = NULL;
  18. struct x25_forward *x25_frwd, *new_frwd;
  19. struct sk_buff *skbn;
  20. short same_lci = 0;
  21. int rc = 0;
  22. if ((rt = x25_get_route(dest_addr)) == NULL)
  23. goto out_no_route;
  24. if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
  25. /* This shouldn't happen, if it occurs somehow
  26. * do something sensible
  27. */
  28. goto out_put_route;
  29. }
  30. /* Avoid a loop. This is the normal exit path for a
  31. * system with only one x.25 iface and default route
  32. */
  33. if (rt->dev == from->dev) {
  34. goto out_put_nb;
  35. }
  36. /* Remote end sending a call request on an already
  37. * established LCI? It shouldn't happen, just in case..
  38. */
  39. read_lock_bh(&x25_forward_list_lock);
  40. list_for_each_entry(x25_frwd, &x25_forward_list, node) {
  41. if (x25_frwd->lci == lci) {
  42. pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
  43. same_lci = 1;
  44. }
  45. }
  46. read_unlock_bh(&x25_forward_list_lock);
  47. /* Save the forwarding details for future traffic */
  48. if (!same_lci){
  49. if ((new_frwd = kmalloc(sizeof(struct x25_forward),
  50. GFP_ATOMIC)) == NULL){
  51. rc = -ENOMEM;
  52. goto out_put_nb;
  53. }
  54. new_frwd->lci = lci;
  55. new_frwd->dev1 = rt->dev;
  56. new_frwd->dev2 = from->dev;
  57. write_lock_bh(&x25_forward_list_lock);
  58. list_add(&new_frwd->node, &x25_forward_list);
  59. write_unlock_bh(&x25_forward_list_lock);
  60. }
  61. /* Forward the call request */
  62. if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
  63. goto out_put_nb;
  64. }
  65. x25_transmit_link(skbn, neigh_new);
  66. rc = 1;
  67. out_put_nb:
  68. x25_neigh_put(neigh_new);
  69. out_put_route:
  70. x25_route_put(rt);
  71. out_no_route:
  72. return rc;
  73. }
  74. int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
  75. struct x25_forward *frwd;
  76. struct net_device *peer = NULL;
  77. struct x25_neigh *nb;
  78. struct sk_buff *skbn;
  79. int rc = 0;
  80. read_lock_bh(&x25_forward_list_lock);
  81. list_for_each_entry(frwd, &x25_forward_list, node) {
  82. if (frwd->lci == lci) {
  83. /* The call is established, either side can send */
  84. if (from->dev == frwd->dev1) {
  85. peer = frwd->dev2;
  86. } else {
  87. peer = frwd->dev1;
  88. }
  89. break;
  90. }
  91. }
  92. read_unlock_bh(&x25_forward_list_lock);
  93. if ( (nb = x25_get_neigh(peer)) == NULL)
  94. goto out;
  95. if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
  96. goto output;
  97. }
  98. x25_transmit_link(skbn, nb);
  99. rc = 1;
  100. output:
  101. x25_neigh_put(nb);
  102. out:
  103. return rc;
  104. }
  105. void x25_clear_forward_by_lci(unsigned int lci)
  106. {
  107. struct x25_forward *fwd, *tmp;
  108. write_lock_bh(&x25_forward_list_lock);
  109. list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
  110. if (fwd->lci == lci) {
  111. list_del(&fwd->node);
  112. kfree(fwd);
  113. }
  114. }
  115. write_unlock_bh(&x25_forward_list_lock);
  116. }
  117. void x25_clear_forward_by_dev(struct net_device *dev)
  118. {
  119. struct x25_forward *fwd, *tmp;
  120. write_lock_bh(&x25_forward_list_lock);
  121. list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
  122. if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
  123. list_del(&fwd->node);
  124. kfree(fwd);
  125. }
  126. }
  127. write_unlock_bh(&x25_forward_list_lock);
  128. }