hdlc_ppp.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic HDLC support routines for Linux
  4. * Point-to-point protocol support
  5. *
  6. * Copyright (C) 1999 - 2008 Krzysztof Halasa <[email protected]>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/hdlc.h>
  10. #include <linux/if_arp.h>
  11. #include <linux/inetdevice.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/pkt_sched.h>
  16. #include <linux/poll.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #define DEBUG_CP 0 /* also bytes# to dump */
  21. #define DEBUG_STATE 0
  22. #define DEBUG_HARD_HEADER 0
  23. #define HDLC_ADDR_ALLSTATIONS 0xFF
  24. #define HDLC_CTRL_UI 0x03
  25. #define PID_LCP 0xC021
  26. #define PID_IP 0x0021
  27. #define PID_IPCP 0x8021
  28. #define PID_IPV6 0x0057
  29. #define PID_IPV6CP 0x8057
  30. enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
  31. enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
  32. CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
  33. LCP_DISC_REQ, CP_CODES};
  34. #if DEBUG_CP
  35. static const char *const code_names[CP_CODES] = {
  36. "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
  37. "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
  38. };
  39. static char debug_buffer[64 + 3 * DEBUG_CP];
  40. #endif
  41. enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
  42. struct hdlc_header {
  43. u8 address;
  44. u8 control;
  45. __be16 protocol;
  46. };
  47. struct cp_header {
  48. u8 code;
  49. u8 id;
  50. __be16 len;
  51. };
  52. struct proto {
  53. struct net_device *dev;
  54. struct timer_list timer;
  55. unsigned long timeout;
  56. u16 pid; /* protocol ID */
  57. u8 state;
  58. u8 cr_id; /* ID of last Configuration-Request */
  59. u8 restart_counter;
  60. };
  61. struct ppp {
  62. struct proto protos[IDX_COUNT];
  63. spinlock_t lock;
  64. unsigned long last_pong;
  65. unsigned int req_timeout, cr_retries, term_retries;
  66. unsigned int keepalive_interval, keepalive_timeout;
  67. u8 seq; /* local sequence number for requests */
  68. u8 echo_id; /* ID of last Echo-Request (LCP) */
  69. };
  70. enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
  71. STATES, STATE_MASK = 0xF};
  72. enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
  73. RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
  74. enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
  75. SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
  76. #if DEBUG_STATE
  77. static const char *const state_names[STATES] = {
  78. "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
  79. "Opened"
  80. };
  81. static const char *const event_names[EVENTS] = {
  82. "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
  83. "RTR", "RTA", "RUC", "RXJ+", "RXJ-"
  84. };
  85. #endif
  86. static struct sk_buff_head tx_queue; /* used when holding the spin lock */
  87. static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs);
  88. static inline struct ppp *get_ppp(struct net_device *dev)
  89. {
  90. return (struct ppp *)dev_to_hdlc(dev)->state;
  91. }
  92. static inline struct proto *get_proto(struct net_device *dev, u16 pid)
  93. {
  94. struct ppp *ppp = get_ppp(dev);
  95. switch (pid) {
  96. case PID_LCP:
  97. return &ppp->protos[IDX_LCP];
  98. case PID_IPCP:
  99. return &ppp->protos[IDX_IPCP];
  100. case PID_IPV6CP:
  101. return &ppp->protos[IDX_IPV6CP];
  102. default:
  103. return NULL;
  104. }
  105. }
  106. static inline const char *proto_name(u16 pid)
  107. {
  108. switch (pid) {
  109. case PID_LCP:
  110. return "LCP";
  111. case PID_IPCP:
  112. return "IPCP";
  113. case PID_IPV6CP:
  114. return "IPV6CP";
  115. default:
  116. return NULL;
  117. }
  118. }
  119. static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
  120. {
  121. struct hdlc_header *data = (struct hdlc_header *)skb->data;
  122. if (skb->len < sizeof(struct hdlc_header))
  123. return htons(ETH_P_HDLC);
  124. if (data->address != HDLC_ADDR_ALLSTATIONS ||
  125. data->control != HDLC_CTRL_UI)
  126. return htons(ETH_P_HDLC);
  127. switch (data->protocol) {
  128. case cpu_to_be16(PID_IP):
  129. skb_pull(skb, sizeof(struct hdlc_header));
  130. return htons(ETH_P_IP);
  131. case cpu_to_be16(PID_IPV6):
  132. skb_pull(skb, sizeof(struct hdlc_header));
  133. return htons(ETH_P_IPV6);
  134. default:
  135. return htons(ETH_P_HDLC);
  136. }
  137. }
  138. static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
  139. u16 type, const void *daddr, const void *saddr,
  140. unsigned int len)
  141. {
  142. struct hdlc_header *data;
  143. #if DEBUG_HARD_HEADER
  144. printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
  145. #endif
  146. skb_push(skb, sizeof(struct hdlc_header));
  147. data = (struct hdlc_header *)skb->data;
  148. data->address = HDLC_ADDR_ALLSTATIONS;
  149. data->control = HDLC_CTRL_UI;
  150. switch (type) {
  151. case ETH_P_IP:
  152. data->protocol = htons(PID_IP);
  153. break;
  154. case ETH_P_IPV6:
  155. data->protocol = htons(PID_IPV6);
  156. break;
  157. case PID_LCP:
  158. case PID_IPCP:
  159. case PID_IPV6CP:
  160. data->protocol = htons(type);
  161. break;
  162. default: /* unknown protocol */
  163. data->protocol = 0;
  164. }
  165. return sizeof(struct hdlc_header);
  166. }
  167. static void ppp_tx_flush(void)
  168. {
  169. struct sk_buff *skb;
  170. while ((skb = skb_dequeue(&tx_queue)) != NULL)
  171. dev_queue_xmit(skb);
  172. }
  173. static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
  174. u8 id, unsigned int len, const void *data)
  175. {
  176. struct sk_buff *skb;
  177. struct cp_header *cp;
  178. unsigned int magic_len = 0;
  179. static u32 magic;
  180. #if DEBUG_CP
  181. int i;
  182. char *ptr;
  183. #endif
  184. if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
  185. magic_len = sizeof(magic);
  186. skb = dev_alloc_skb(sizeof(struct hdlc_header) +
  187. sizeof(struct cp_header) + magic_len + len);
  188. if (!skb)
  189. return;
  190. skb_reserve(skb, sizeof(struct hdlc_header));
  191. cp = skb_put(skb, sizeof(struct cp_header));
  192. cp->code = code;
  193. cp->id = id;
  194. cp->len = htons(sizeof(struct cp_header) + magic_len + len);
  195. if (magic_len)
  196. skb_put_data(skb, &magic, magic_len);
  197. if (len)
  198. skb_put_data(skb, data, len);
  199. #if DEBUG_CP
  200. BUG_ON(code >= CP_CODES);
  201. ptr = debug_buffer;
  202. *ptr = '\x0';
  203. for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
  204. sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
  205. ptr += strlen(ptr);
  206. }
  207. printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
  208. proto_name(pid), code_names[code], id, debug_buffer);
  209. #endif
  210. ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
  211. skb->priority = TC_PRIO_CONTROL;
  212. skb->dev = dev;
  213. skb->protocol = htons(ETH_P_HDLC);
  214. skb_reset_network_header(skb);
  215. skb_queue_tail(&tx_queue, skb);
  216. }
  217. /* State transition table (compare STD-51)
  218. Events Actions
  219. TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
  220. TO- = Timeout with counter expired zrc = Zero-Restart-Count
  221. RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
  222. RCR- = Receive-Configure-Request (Bad)
  223. RCA = Receive-Configure-Ack sca = Send-Configure-Ack
  224. RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
  225. RTR = Receive-Terminate-Request str = Send-Terminate-Request
  226. RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
  227. RUC = Receive-Unknown-Code scj = Send-Code-Reject
  228. RXJ+ = Receive-Code-Reject (permitted)
  229. or Receive-Protocol-Reject
  230. RXJ- = Receive-Code-Reject (catastrophic)
  231. or Receive-Protocol-Reject
  232. */
  233. static int cp_table[EVENTS][STATES] = {
  234. /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
  235. 0 1 2 3 4 5 6 */
  236. {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
  237. { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
  238. { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
  239. { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
  240. { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
  241. { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
  242. { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
  243. { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
  244. { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
  245. { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
  246. { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
  247. { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
  248. { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
  249. };
  250. /* SCA: RCR+ must supply id, len and data
  251. SCN: RCR- must supply code, id, len and data
  252. STA: RTR must supply id
  253. SCJ: RUC must supply CP packet len and data */
  254. static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
  255. u8 id, unsigned int len, const void *data)
  256. {
  257. int old_state, action;
  258. struct ppp *ppp = get_ppp(dev);
  259. struct proto *proto = get_proto(dev, pid);
  260. old_state = proto->state;
  261. BUG_ON(old_state >= STATES);
  262. BUG_ON(event >= EVENTS);
  263. #if DEBUG_STATE
  264. printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
  265. proto_name(pid), event_names[event], state_names[proto->state]);
  266. #endif
  267. action = cp_table[event][old_state];
  268. proto->state = action & STATE_MASK;
  269. if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
  270. mod_timer(&proto->timer, proto->timeout =
  271. jiffies + ppp->req_timeout * HZ);
  272. if (action & ZRC)
  273. proto->restart_counter = 0;
  274. if (action & IRC)
  275. proto->restart_counter = (proto->state == STOPPING) ?
  276. ppp->term_retries : ppp->cr_retries;
  277. if (action & SCR) /* send Configure-Request */
  278. ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
  279. 0, NULL);
  280. if (action & SCA) /* send Configure-Ack */
  281. ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
  282. if (action & SCN) /* send Configure-Nak/Reject */
  283. ppp_tx_cp(dev, pid, code, id, len, data);
  284. if (action & STR) /* send Terminate-Request */
  285. ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
  286. if (action & STA) /* send Terminate-Ack */
  287. ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
  288. if (action & SCJ) /* send Code-Reject */
  289. ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
  290. if (old_state != OPENED && proto->state == OPENED) {
  291. netdev_info(dev, "%s up\n", proto_name(pid));
  292. if (pid == PID_LCP) {
  293. netif_dormant_off(dev);
  294. ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
  295. ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
  296. ppp->last_pong = jiffies;
  297. mod_timer(&proto->timer, proto->timeout =
  298. jiffies + ppp->keepalive_interval * HZ);
  299. }
  300. }
  301. if (old_state == OPENED && proto->state != OPENED) {
  302. netdev_info(dev, "%s down\n", proto_name(pid));
  303. if (pid == PID_LCP) {
  304. netif_dormant_on(dev);
  305. ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
  306. ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
  307. }
  308. }
  309. if (old_state != CLOSED && proto->state == CLOSED)
  310. del_timer(&proto->timer);
  311. #if DEBUG_STATE
  312. printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
  313. proto_name(pid), event_names[event], state_names[proto->state]);
  314. #endif
  315. }
  316. static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
  317. unsigned int req_len, const u8 *data)
  318. {
  319. static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
  320. const u8 *opt;
  321. u8 *out;
  322. unsigned int len = req_len, nak_len = 0, rej_len = 0;
  323. out = kmalloc(len, GFP_ATOMIC);
  324. if (!out) {
  325. dev->stats.rx_dropped++;
  326. return; /* out of memory, ignore CR packet */
  327. }
  328. for (opt = data; len; len -= opt[1], opt += opt[1]) {
  329. if (len < 2 || opt[1] < 2 || len < opt[1])
  330. goto err_out;
  331. if (pid == PID_LCP)
  332. switch (opt[0]) {
  333. case LCP_OPTION_MRU:
  334. continue; /* MRU always OK and > 1500 bytes? */
  335. case LCP_OPTION_ACCM: /* async control character map */
  336. if (opt[1] < sizeof(valid_accm))
  337. goto err_out;
  338. if (!memcmp(opt, valid_accm,
  339. sizeof(valid_accm)))
  340. continue;
  341. if (!rej_len) { /* NAK it */
  342. memcpy(out + nak_len, valid_accm,
  343. sizeof(valid_accm));
  344. nak_len += sizeof(valid_accm);
  345. continue;
  346. }
  347. break;
  348. case LCP_OPTION_MAGIC:
  349. if (len < 6)
  350. goto err_out;
  351. if (opt[1] != 6 || (!opt[2] && !opt[3] &&
  352. !opt[4] && !opt[5]))
  353. break; /* reject invalid magic number */
  354. continue;
  355. }
  356. /* reject this option */
  357. memcpy(out + rej_len, opt, opt[1]);
  358. rej_len += opt[1];
  359. }
  360. if (rej_len)
  361. ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
  362. else if (nak_len)
  363. ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
  364. else
  365. ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
  366. kfree(out);
  367. return;
  368. err_out:
  369. dev->stats.rx_errors++;
  370. kfree(out);
  371. }
  372. static int ppp_rx(struct sk_buff *skb)
  373. {
  374. struct hdlc_header *hdr = (struct hdlc_header *)skb->data;
  375. struct net_device *dev = skb->dev;
  376. struct ppp *ppp = get_ppp(dev);
  377. struct proto *proto;
  378. struct cp_header *cp;
  379. unsigned long flags;
  380. unsigned int len;
  381. u16 pid;
  382. #if DEBUG_CP
  383. int i;
  384. char *ptr;
  385. #endif
  386. spin_lock_irqsave(&ppp->lock, flags);
  387. /* Check HDLC header */
  388. if (skb->len < sizeof(struct hdlc_header))
  389. goto rx_error;
  390. cp = skb_pull(skb, sizeof(struct hdlc_header));
  391. if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
  392. hdr->control != HDLC_CTRL_UI)
  393. goto rx_error;
  394. pid = ntohs(hdr->protocol);
  395. proto = get_proto(dev, pid);
  396. if (!proto) {
  397. if (ppp->protos[IDX_LCP].state == OPENED)
  398. ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
  399. ++ppp->seq, skb->len + 2, &hdr->protocol);
  400. goto rx_error;
  401. }
  402. len = ntohs(cp->len);
  403. if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
  404. skb->len < len /* truncated packet? */)
  405. goto rx_error;
  406. skb_pull(skb, sizeof(struct cp_header));
  407. len -= sizeof(struct cp_header);
  408. /* HDLC and CP headers stripped from skb */
  409. #if DEBUG_CP
  410. if (cp->code < CP_CODES)
  411. sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
  412. cp->id);
  413. else
  414. sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
  415. ptr = debug_buffer + strlen(debug_buffer);
  416. for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
  417. sprintf(ptr, " %02X", skb->data[i]);
  418. ptr += strlen(ptr);
  419. }
  420. printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
  421. debug_buffer);
  422. #endif
  423. /* LCP only */
  424. if (pid == PID_LCP)
  425. switch (cp->code) {
  426. case LCP_PROTO_REJ:
  427. pid = ntohs(*(__be16 *)skb->data);
  428. if (pid == PID_LCP || pid == PID_IPCP ||
  429. pid == PID_IPV6CP)
  430. ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
  431. 0, NULL);
  432. goto out;
  433. case LCP_ECHO_REQ: /* send Echo-Reply */
  434. if (len >= 4 && proto->state == OPENED)
  435. ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
  436. cp->id, len - 4, skb->data + 4);
  437. goto out;
  438. case LCP_ECHO_REPLY:
  439. if (cp->id == ppp->echo_id)
  440. ppp->last_pong = jiffies;
  441. goto out;
  442. case LCP_DISC_REQ: /* discard */
  443. goto out;
  444. }
  445. /* LCP, IPCP and IPV6CP */
  446. switch (cp->code) {
  447. case CP_CONF_REQ:
  448. ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
  449. break;
  450. case CP_CONF_ACK:
  451. if (cp->id == proto->cr_id)
  452. ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
  453. break;
  454. case CP_CONF_REJ:
  455. case CP_CONF_NAK:
  456. if (cp->id == proto->cr_id)
  457. ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
  458. break;
  459. case CP_TERM_REQ:
  460. ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
  461. break;
  462. case CP_TERM_ACK:
  463. ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
  464. break;
  465. case CP_CODE_REJ:
  466. ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
  467. break;
  468. default:
  469. len += sizeof(struct cp_header);
  470. if (len > dev->mtu)
  471. len = dev->mtu;
  472. ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
  473. break;
  474. }
  475. goto out;
  476. rx_error:
  477. dev->stats.rx_errors++;
  478. out:
  479. spin_unlock_irqrestore(&ppp->lock, flags);
  480. dev_kfree_skb_any(skb);
  481. ppp_tx_flush();
  482. return NET_RX_DROP;
  483. }
  484. static void ppp_timer(struct timer_list *t)
  485. {
  486. struct proto *proto = from_timer(proto, t, timer);
  487. struct ppp *ppp = get_ppp(proto->dev);
  488. unsigned long flags;
  489. spin_lock_irqsave(&ppp->lock, flags);
  490. /* mod_timer could be called after we entered this function but
  491. * before we got the lock.
  492. */
  493. if (timer_pending(&proto->timer)) {
  494. spin_unlock_irqrestore(&ppp->lock, flags);
  495. return;
  496. }
  497. switch (proto->state) {
  498. case STOPPING:
  499. case REQ_SENT:
  500. case ACK_RECV:
  501. case ACK_SENT:
  502. if (proto->restart_counter) {
  503. ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
  504. 0, NULL);
  505. proto->restart_counter--;
  506. } else if (netif_carrier_ok(proto->dev))
  507. ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
  508. 0, NULL);
  509. else
  510. ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
  511. 0, NULL);
  512. break;
  513. case OPENED:
  514. if (proto->pid != PID_LCP)
  515. break;
  516. if (time_after(jiffies, ppp->last_pong +
  517. ppp->keepalive_timeout * HZ)) {
  518. netdev_info(proto->dev, "Link down\n");
  519. ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
  520. ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
  521. } else { /* send keep-alive packet */
  522. ppp->echo_id = ++ppp->seq;
  523. ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
  524. ppp->echo_id, 0, NULL);
  525. proto->timer.expires = jiffies +
  526. ppp->keepalive_interval * HZ;
  527. add_timer(&proto->timer);
  528. }
  529. break;
  530. }
  531. spin_unlock_irqrestore(&ppp->lock, flags);
  532. ppp_tx_flush();
  533. }
  534. static void ppp_start(struct net_device *dev)
  535. {
  536. struct ppp *ppp = get_ppp(dev);
  537. int i;
  538. for (i = 0; i < IDX_COUNT; i++) {
  539. struct proto *proto = &ppp->protos[i];
  540. proto->dev = dev;
  541. timer_setup(&proto->timer, ppp_timer, 0);
  542. proto->state = CLOSED;
  543. }
  544. ppp->protos[IDX_LCP].pid = PID_LCP;
  545. ppp->protos[IDX_IPCP].pid = PID_IPCP;
  546. ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
  547. ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
  548. }
  549. static void ppp_stop(struct net_device *dev)
  550. {
  551. ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
  552. }
  553. static void ppp_close(struct net_device *dev)
  554. {
  555. ppp_tx_flush();
  556. }
  557. static struct hdlc_proto proto = {
  558. .start = ppp_start,
  559. .stop = ppp_stop,
  560. .close = ppp_close,
  561. .type_trans = ppp_type_trans,
  562. .ioctl = ppp_ioctl,
  563. .netif_rx = ppp_rx,
  564. .module = THIS_MODULE,
  565. };
  566. static const struct header_ops ppp_header_ops = {
  567. .create = ppp_hard_header,
  568. };
  569. static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs)
  570. {
  571. hdlc_device *hdlc = dev_to_hdlc(dev);
  572. struct ppp *ppp;
  573. int result;
  574. switch (ifs->type) {
  575. case IF_GET_PROTO:
  576. if (dev_to_hdlc(dev)->proto != &proto)
  577. return -EINVAL;
  578. ifs->type = IF_PROTO_PPP;
  579. return 0; /* return protocol only, no settable parameters */
  580. case IF_PROTO_PPP:
  581. if (!capable(CAP_NET_ADMIN))
  582. return -EPERM;
  583. if (dev->flags & IFF_UP)
  584. return -EBUSY;
  585. /* no settable parameters */
  586. result = hdlc->attach(dev, ENCODING_NRZ,
  587. PARITY_CRC16_PR1_CCITT);
  588. if (result)
  589. return result;
  590. result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
  591. if (result)
  592. return result;
  593. ppp = get_ppp(dev);
  594. spin_lock_init(&ppp->lock);
  595. ppp->req_timeout = 2;
  596. ppp->cr_retries = 10;
  597. ppp->term_retries = 2;
  598. ppp->keepalive_interval = 10;
  599. ppp->keepalive_timeout = 60;
  600. dev->hard_header_len = sizeof(struct hdlc_header);
  601. dev->header_ops = &ppp_header_ops;
  602. dev->type = ARPHRD_PPP;
  603. call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
  604. netif_dormant_on(dev);
  605. return 0;
  606. }
  607. return -EINVAL;
  608. }
  609. static int __init hdlc_ppp_init(void)
  610. {
  611. skb_queue_head_init(&tx_queue);
  612. register_hdlc_protocol(&proto);
  613. return 0;
  614. }
  615. static void __exit hdlc_ppp_exit(void)
  616. {
  617. unregister_hdlc_protocol(&proto);
  618. }
  619. module_init(hdlc_ppp_init);
  620. module_exit(hdlc_ppp_exit);
  621. MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
  622. MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
  623. MODULE_LICENSE("GPL v2");