qeth_l3_main.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2007, 2009
  4. * Author(s): Utz Bacher <[email protected]>,
  5. * Frank Pavlic <[email protected]>,
  6. * Thomas Spatzier <[email protected]>,
  7. * Frank Blaschka <[email protected]>
  8. */
  9. #define KMSG_COMPONENT "qeth"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/bitops.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/ip.h>
  19. #include <linux/in.h>
  20. #include <linux/ipv6.h>
  21. #include <linux/inetdevice.h>
  22. #include <linux/igmp.h>
  23. #include <linux/slab.h>
  24. #include <linux/if_ether.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/skbuff.h>
  27. #include <net/ip.h>
  28. #include <net/arp.h>
  29. #include <net/route.h>
  30. #include <net/ipv6.h>
  31. #include <net/ip6_route.h>
  32. #include <net/iucv/af_iucv.h>
  33. #include <linux/hashtable.h>
  34. #include "qeth_l3.h"
  35. static int qeth_l3_register_addr_entry(struct qeth_card *,
  36. struct qeth_ipaddr *);
  37. static int qeth_l3_deregister_addr_entry(struct qeth_card *,
  38. struct qeth_ipaddr *);
  39. int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
  40. char *buf)
  41. {
  42. if (proto == QETH_PROT_IPV4)
  43. return sprintf(buf, "%pI4", addr);
  44. else
  45. return sprintf(buf, "%pI6", addr);
  46. }
  47. static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
  48. struct qeth_ipaddr *query)
  49. {
  50. u32 key = qeth_l3_ipaddr_hash(query);
  51. struct qeth_ipaddr *addr;
  52. if (query->is_multicast) {
  53. hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key)
  54. if (qeth_l3_addr_match_ip(addr, query))
  55. return addr;
  56. } else {
  57. hash_for_each_possible(card->ip_htable, addr, hnode, key)
  58. if (qeth_l3_addr_match_ip(addr, query))
  59. return addr;
  60. }
  61. return NULL;
  62. }
  63. static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
  64. {
  65. int i, j;
  66. u8 octet;
  67. for (i = 0; i < len; ++i) {
  68. octet = addr[i];
  69. for (j = 7; j >= 0; --j) {
  70. bits[i*8 + j] = octet & 1;
  71. octet >>= 1;
  72. }
  73. }
  74. }
  75. static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
  76. struct qeth_ipaddr *addr)
  77. {
  78. struct qeth_ipato_entry *ipatoe;
  79. u8 addr_bits[128] = {0, };
  80. u8 ipatoe_bits[128] = {0, };
  81. int rc = 0;
  82. if (!card->ipato.enabled)
  83. return false;
  84. if (addr->type != QETH_IP_TYPE_NORMAL)
  85. return false;
  86. qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
  87. (addr->proto == QETH_PROT_IPV4) ? 4 : 16);
  88. list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
  89. if (addr->proto != ipatoe->proto)
  90. continue;
  91. qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
  92. (ipatoe->proto == QETH_PROT_IPV4) ?
  93. 4 : 16);
  94. rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
  95. if (rc)
  96. break;
  97. }
  98. /* invert? */
  99. if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4)
  100. rc = !rc;
  101. else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6)
  102. rc = !rc;
  103. return rc;
  104. }
  105. static int qeth_l3_delete_ip(struct qeth_card *card,
  106. struct qeth_ipaddr *tmp_addr)
  107. {
  108. int rc = 0;
  109. struct qeth_ipaddr *addr;
  110. if (tmp_addr->type == QETH_IP_TYPE_RXIP)
  111. QETH_CARD_TEXT(card, 2, "delrxip");
  112. else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
  113. QETH_CARD_TEXT(card, 2, "delvipa");
  114. else
  115. QETH_CARD_TEXT(card, 2, "delip");
  116. if (tmp_addr->proto == QETH_PROT_IPV4)
  117. QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
  118. else {
  119. QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
  120. QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
  121. }
  122. addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
  123. if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
  124. return -ENOENT;
  125. addr->ref_counter--;
  126. if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
  127. return rc;
  128. if (qeth_card_hw_is_reachable(card))
  129. rc = qeth_l3_deregister_addr_entry(card, addr);
  130. hash_del(&addr->hnode);
  131. kfree(addr);
  132. return rc;
  133. }
  134. static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
  135. {
  136. int rc = 0;
  137. struct qeth_ipaddr *addr;
  138. char buf[40];
  139. if (tmp_addr->type == QETH_IP_TYPE_RXIP)
  140. QETH_CARD_TEXT(card, 2, "addrxip");
  141. else if (tmp_addr->type == QETH_IP_TYPE_VIPA)
  142. QETH_CARD_TEXT(card, 2, "addvipa");
  143. else
  144. QETH_CARD_TEXT(card, 2, "addip");
  145. if (tmp_addr->proto == QETH_PROT_IPV4)
  146. QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
  147. else {
  148. QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
  149. QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
  150. }
  151. addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
  152. if (addr) {
  153. if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
  154. return -EADDRINUSE;
  155. if (qeth_l3_addr_match_all(addr, tmp_addr)) {
  156. addr->ref_counter++;
  157. return 0;
  158. }
  159. qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
  160. buf);
  161. dev_warn(&card->gdev->dev,
  162. "Registering IP address %s failed\n", buf);
  163. return -EADDRINUSE;
  164. } else {
  165. addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL);
  166. if (!addr)
  167. return -ENOMEM;
  168. if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
  169. QETH_CARD_TEXT(card, 2, "tkovaddr");
  170. addr->ipato = 1;
  171. }
  172. hash_add(card->ip_htable, &addr->hnode,
  173. qeth_l3_ipaddr_hash(addr));
  174. if (!qeth_card_hw_is_reachable(card)) {
  175. addr->disp_flag = QETH_DISP_ADDR_ADD;
  176. return 0;
  177. }
  178. rc = qeth_l3_register_addr_entry(card, addr);
  179. if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
  180. addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  181. } else {
  182. hash_del(&addr->hnode);
  183. kfree(addr);
  184. }
  185. }
  186. return rc;
  187. }
  188. static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr,
  189. bool add)
  190. {
  191. int rc;
  192. mutex_lock(&card->ip_lock);
  193. rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr);
  194. mutex_unlock(&card->ip_lock);
  195. return rc;
  196. }
  197. static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
  198. {
  199. struct qeth_ipaddr *addr;
  200. struct hlist_node *tmp;
  201. int i;
  202. hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
  203. hash_del(&addr->hnode);
  204. kfree(addr);
  205. }
  206. }
  207. static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
  208. {
  209. struct qeth_ipaddr *addr;
  210. struct hlist_node *tmp;
  211. int i;
  212. QETH_CARD_TEXT(card, 4, "clearip");
  213. mutex_lock(&card->ip_lock);
  214. hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
  215. if (!recover) {
  216. hash_del(&addr->hnode);
  217. kfree(addr);
  218. continue;
  219. }
  220. addr->disp_flag = QETH_DISP_ADDR_ADD;
  221. }
  222. mutex_unlock(&card->ip_lock);
  223. }
  224. static void qeth_l3_recover_ip(struct qeth_card *card)
  225. {
  226. struct qeth_ipaddr *addr;
  227. struct hlist_node *tmp;
  228. int i;
  229. int rc;
  230. QETH_CARD_TEXT(card, 4, "recovrip");
  231. mutex_lock(&card->ip_lock);
  232. hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
  233. if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
  234. rc = qeth_l3_register_addr_entry(card, addr);
  235. if (!rc) {
  236. addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  237. } else {
  238. hash_del(&addr->hnode);
  239. kfree(addr);
  240. }
  241. }
  242. }
  243. mutex_unlock(&card->ip_lock);
  244. }
  245. static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
  246. unsigned long data)
  247. {
  248. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  249. switch (cmd->hdr.return_code) {
  250. case IPA_RC_SUCCESS:
  251. return 0;
  252. case IPA_RC_DUPLICATE_IP_ADDRESS:
  253. return -EADDRINUSE;
  254. case IPA_RC_MC_ADDR_NOT_FOUND:
  255. return -ENOENT;
  256. case IPA_RC_LAN_OFFLINE:
  257. return -ENETDOWN;
  258. default:
  259. return -EIO;
  260. }
  261. }
  262. static int qeth_l3_send_setdelmc(struct qeth_card *card,
  263. struct qeth_ipaddr *addr,
  264. enum qeth_ipa_cmds ipacmd)
  265. {
  266. struct qeth_cmd_buffer *iob;
  267. struct qeth_ipa_cmd *cmd;
  268. QETH_CARD_TEXT(card, 4, "setdelmc");
  269. iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
  270. IPA_DATA_SIZEOF(setdelipm));
  271. if (!iob)
  272. return -ENOMEM;
  273. cmd = __ipa_cmd(iob);
  274. if (addr->proto == QETH_PROT_IPV6) {
  275. cmd->data.setdelipm.ip = addr->u.a6.addr;
  276. ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
  277. } else {
  278. cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
  279. ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
  280. }
  281. return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
  282. }
  283. static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len)
  284. {
  285. unsigned int i = 0;
  286. while (len && i < 4) {
  287. int mask_len = min_t(int, len, 32);
  288. prefix->s6_addr32[i] = inet_make_mask(mask_len);
  289. len -= mask_len;
  290. i++;
  291. }
  292. }
  293. static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set)
  294. {
  295. switch (addr->type) {
  296. case QETH_IP_TYPE_RXIP:
  297. return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
  298. case QETH_IP_TYPE_VIPA:
  299. return (set) ? QETH_IPA_SETIP_VIPA_FLAG :
  300. QETH_IPA_DELIP_VIPA_FLAG;
  301. default:
  302. return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0;
  303. }
  304. }
  305. static int qeth_l3_send_setdelip(struct qeth_card *card,
  306. struct qeth_ipaddr *addr,
  307. enum qeth_ipa_cmds ipacmd)
  308. {
  309. struct qeth_cmd_buffer *iob;
  310. struct qeth_ipa_cmd *cmd;
  311. u32 flags;
  312. QETH_CARD_TEXT(card, 4, "setdelip");
  313. iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto,
  314. IPA_DATA_SIZEOF(setdelip6));
  315. if (!iob)
  316. return -ENOMEM;
  317. cmd = __ipa_cmd(iob);
  318. flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP);
  319. QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
  320. if (addr->proto == QETH_PROT_IPV6) {
  321. cmd->data.setdelip6.addr = addr->u.a6.addr;
  322. qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix,
  323. addr->u.a6.pfxlen);
  324. cmd->data.setdelip6.flags = flags;
  325. } else {
  326. cmd->data.setdelip4.addr = addr->u.a4.addr;
  327. cmd->data.setdelip4.mask = addr->u.a4.mask;
  328. cmd->data.setdelip4.flags = flags;
  329. }
  330. return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
  331. }
  332. static int qeth_l3_send_setrouting(struct qeth_card *card,
  333. enum qeth_routing_types type, enum qeth_prot_versions prot)
  334. {
  335. int rc;
  336. struct qeth_ipa_cmd *cmd;
  337. struct qeth_cmd_buffer *iob;
  338. QETH_CARD_TEXT(card, 4, "setroutg");
  339. iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot,
  340. IPA_DATA_SIZEOF(setrtg));
  341. if (!iob)
  342. return -ENOMEM;
  343. cmd = __ipa_cmd(iob);
  344. cmd->data.setrtg.type = (type);
  345. rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
  346. return rc;
  347. }
  348. static int qeth_l3_correct_routing_type(struct qeth_card *card,
  349. enum qeth_routing_types *type, enum qeth_prot_versions prot)
  350. {
  351. if (IS_IQD(card)) {
  352. switch (*type) {
  353. case NO_ROUTER:
  354. case PRIMARY_CONNECTOR:
  355. case SECONDARY_CONNECTOR:
  356. case MULTICAST_ROUTER:
  357. return 0;
  358. default:
  359. goto out_inval;
  360. }
  361. } else {
  362. switch (*type) {
  363. case NO_ROUTER:
  364. case PRIMARY_ROUTER:
  365. case SECONDARY_ROUTER:
  366. return 0;
  367. case MULTICAST_ROUTER:
  368. if (qeth_is_ipafunc_supported(card, prot,
  369. IPA_OSA_MC_ROUTER))
  370. return 0;
  371. goto out_inval;
  372. default:
  373. goto out_inval;
  374. }
  375. }
  376. out_inval:
  377. *type = NO_ROUTER;
  378. return -EINVAL;
  379. }
  380. int qeth_l3_setrouting_v4(struct qeth_card *card)
  381. {
  382. int rc;
  383. QETH_CARD_TEXT(card, 3, "setrtg4");
  384. rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
  385. QETH_PROT_IPV4);
  386. if (rc)
  387. return rc;
  388. rc = qeth_l3_send_setrouting(card, card->options.route4.type,
  389. QETH_PROT_IPV4);
  390. if (rc) {
  391. card->options.route4.type = NO_ROUTER;
  392. QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
  393. rc, CARD_DEVID(card));
  394. }
  395. return rc;
  396. }
  397. int qeth_l3_setrouting_v6(struct qeth_card *card)
  398. {
  399. int rc = 0;
  400. QETH_CARD_TEXT(card, 3, "setrtg6");
  401. if (!qeth_is_supported(card, IPA_IPV6))
  402. return 0;
  403. rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
  404. QETH_PROT_IPV6);
  405. if (rc)
  406. return rc;
  407. rc = qeth_l3_send_setrouting(card, card->options.route6.type,
  408. QETH_PROT_IPV6);
  409. if (rc) {
  410. card->options.route6.type = NO_ROUTER;
  411. QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
  412. rc, CARD_DEVID(card));
  413. }
  414. return rc;
  415. }
  416. /*
  417. * IP address takeover related functions
  418. */
  419. /*
  420. * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
  421. *
  422. * Caller must hold ip_lock.
  423. */
  424. void qeth_l3_update_ipato(struct qeth_card *card)
  425. {
  426. struct qeth_ipaddr *addr;
  427. unsigned int i;
  428. hash_for_each(card->ip_htable, i, addr, hnode) {
  429. if (addr->type != QETH_IP_TYPE_NORMAL)
  430. continue;
  431. addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr);
  432. }
  433. }
  434. static void qeth_l3_clear_ipato_list(struct qeth_card *card)
  435. {
  436. struct qeth_ipato_entry *ipatoe, *tmp;
  437. mutex_lock(&card->ip_lock);
  438. list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
  439. list_del(&ipatoe->entry);
  440. kfree(ipatoe);
  441. }
  442. qeth_l3_update_ipato(card);
  443. mutex_unlock(&card->ip_lock);
  444. }
  445. int qeth_l3_add_ipato_entry(struct qeth_card *card,
  446. struct qeth_ipato_entry *new)
  447. {
  448. struct qeth_ipato_entry *ipatoe;
  449. int rc = 0;
  450. QETH_CARD_TEXT(card, 2, "addipato");
  451. mutex_lock(&card->ip_lock);
  452. list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
  453. if (ipatoe->proto != new->proto)
  454. continue;
  455. if (!memcmp(ipatoe->addr, new->addr,
  456. (ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) &&
  457. (ipatoe->mask_bits == new->mask_bits)) {
  458. rc = -EEXIST;
  459. break;
  460. }
  461. }
  462. if (!rc) {
  463. list_add_tail(&new->entry, &card->ipato.entries);
  464. qeth_l3_update_ipato(card);
  465. }
  466. mutex_unlock(&card->ip_lock);
  467. return rc;
  468. }
  469. int qeth_l3_del_ipato_entry(struct qeth_card *card,
  470. enum qeth_prot_versions proto, u8 *addr,
  471. unsigned int mask_bits)
  472. {
  473. struct qeth_ipato_entry *ipatoe, *tmp;
  474. int rc = -ENOENT;
  475. QETH_CARD_TEXT(card, 2, "delipato");
  476. mutex_lock(&card->ip_lock);
  477. list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
  478. if (ipatoe->proto != proto)
  479. continue;
  480. if (!memcmp(ipatoe->addr, addr,
  481. (proto == QETH_PROT_IPV4) ? 4 : 16) &&
  482. (ipatoe->mask_bits == mask_bits)) {
  483. list_del(&ipatoe->entry);
  484. qeth_l3_update_ipato(card);
  485. kfree(ipatoe);
  486. rc = 0;
  487. }
  488. }
  489. mutex_unlock(&card->ip_lock);
  490. return rc;
  491. }
  492. int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
  493. enum qeth_ip_types type,
  494. enum qeth_prot_versions proto)
  495. {
  496. struct qeth_ipaddr addr;
  497. qeth_l3_init_ipaddr(&addr, type, proto);
  498. if (proto == QETH_PROT_IPV4)
  499. memcpy(&addr.u.a4.addr, ip, 4);
  500. else
  501. memcpy(&addr.u.a6.addr, ip, 16);
  502. return qeth_l3_modify_ip(card, &addr, add);
  503. }
  504. int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
  505. {
  506. struct qeth_ipaddr addr;
  507. unsigned int i;
  508. qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
  509. addr.u.a6.addr.s6_addr[0] = 0xfe;
  510. addr.u.a6.addr.s6_addr[1] = 0x80;
  511. for (i = 0; i < 8; i++)
  512. addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
  513. return qeth_l3_modify_ip(card, &addr, add);
  514. }
  515. static int qeth_l3_register_addr_entry(struct qeth_card *card,
  516. struct qeth_ipaddr *addr)
  517. {
  518. char buf[50];
  519. int rc = 0;
  520. int cnt = 3;
  521. if (card->options.sniffer)
  522. return 0;
  523. if (addr->proto == QETH_PROT_IPV4) {
  524. QETH_CARD_TEXT(card, 2, "setaddr4");
  525. QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
  526. } else if (addr->proto == QETH_PROT_IPV6) {
  527. QETH_CARD_TEXT(card, 2, "setaddr6");
  528. QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
  529. QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
  530. } else {
  531. QETH_CARD_TEXT(card, 2, "setaddr?");
  532. QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
  533. }
  534. do {
  535. if (addr->is_multicast)
  536. rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM);
  537. else
  538. rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP);
  539. if (rc)
  540. QETH_CARD_TEXT(card, 2, "failed");
  541. } while ((--cnt > 0) && rc);
  542. if (rc) {
  543. QETH_CARD_TEXT(card, 2, "FAILED");
  544. qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
  545. dev_warn(&card->gdev->dev,
  546. "Registering IP address %s failed\n", buf);
  547. }
  548. return rc;
  549. }
  550. static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
  551. struct qeth_ipaddr *addr)
  552. {
  553. int rc = 0;
  554. if (card->options.sniffer)
  555. return 0;
  556. if (addr->proto == QETH_PROT_IPV4) {
  557. QETH_CARD_TEXT(card, 2, "deladdr4");
  558. QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
  559. } else if (addr->proto == QETH_PROT_IPV6) {
  560. QETH_CARD_TEXT(card, 2, "deladdr6");
  561. QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
  562. QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
  563. } else {
  564. QETH_CARD_TEXT(card, 2, "deladdr?");
  565. QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
  566. }
  567. if (addr->is_multicast)
  568. rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
  569. else
  570. rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP);
  571. if (rc)
  572. QETH_CARD_TEXT(card, 2, "failed");
  573. return rc;
  574. }
  575. static int qeth_l3_setadapter_parms(struct qeth_card *card)
  576. {
  577. int rc = 0;
  578. QETH_CARD_TEXT(card, 2, "setadprm");
  579. if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) {
  580. rc = qeth_setadpparms_change_macaddr(card);
  581. if (rc)
  582. dev_warn(&card->gdev->dev, "Reading the adapter MAC"
  583. " address failed\n");
  584. }
  585. return rc;
  586. }
  587. static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
  588. {
  589. int rc;
  590. QETH_CARD_TEXT(card, 3, "ipaarp");
  591. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  592. dev_info(&card->gdev->dev,
  593. "ARP processing not supported on %s!\n",
  594. netdev_name(card->dev));
  595. return 0;
  596. }
  597. rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
  598. IPA_CMD_ASS_START, NULL);
  599. if (rc) {
  600. dev_warn(&card->gdev->dev,
  601. "Starting ARP processing support for %s failed\n",
  602. netdev_name(card->dev));
  603. }
  604. return rc;
  605. }
  606. static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
  607. {
  608. int rc;
  609. QETH_CARD_TEXT(card, 3, "stsrcmac");
  610. if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
  611. dev_info(&card->gdev->dev,
  612. "Inbound source MAC-address not supported on %s\n",
  613. netdev_name(card->dev));
  614. return -EOPNOTSUPP;
  615. }
  616. rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC,
  617. IPA_CMD_ASS_START, NULL);
  618. if (rc)
  619. dev_warn(&card->gdev->dev,
  620. "Starting source MAC-address support for %s failed\n",
  621. netdev_name(card->dev));
  622. return rc;
  623. }
  624. static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
  625. {
  626. int rc = 0;
  627. QETH_CARD_TEXT(card, 3, "strtvlan");
  628. if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
  629. dev_info(&card->gdev->dev,
  630. "VLAN not supported on %s\n", netdev_name(card->dev));
  631. return -EOPNOTSUPP;
  632. }
  633. rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO,
  634. IPA_CMD_ASS_START, NULL);
  635. if (rc) {
  636. dev_warn(&card->gdev->dev,
  637. "Starting VLAN support for %s failed\n",
  638. netdev_name(card->dev));
  639. } else {
  640. dev_info(&card->gdev->dev, "VLAN enabled\n");
  641. }
  642. return rc;
  643. }
  644. static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
  645. {
  646. int rc;
  647. QETH_CARD_TEXT(card, 3, "stmcast");
  648. if (!qeth_is_supported(card, IPA_MULTICASTING)) {
  649. dev_info(&card->gdev->dev,
  650. "Multicast not supported on %s\n",
  651. netdev_name(card->dev));
  652. return -EOPNOTSUPP;
  653. }
  654. rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING,
  655. IPA_CMD_ASS_START, NULL);
  656. if (rc) {
  657. dev_warn(&card->gdev->dev,
  658. "Starting multicast support for %s failed\n",
  659. netdev_name(card->dev));
  660. } else {
  661. dev_info(&card->gdev->dev, "Multicast enabled\n");
  662. card->dev->flags |= IFF_MULTICAST;
  663. }
  664. return rc;
  665. }
  666. static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
  667. {
  668. u32 ipv6_data = 3;
  669. int rc;
  670. QETH_CARD_TEXT(card, 3, "softipv6");
  671. if (IS_IQD(card))
  672. goto out;
  673. rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START,
  674. &ipv6_data);
  675. if (rc) {
  676. dev_err(&card->gdev->dev,
  677. "Activating IPv6 support for %s failed\n",
  678. netdev_name(card->dev));
  679. return rc;
  680. }
  681. rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
  682. NULL);
  683. if (rc) {
  684. dev_err(&card->gdev->dev,
  685. "Activating IPv6 support for %s failed\n",
  686. netdev_name(card->dev));
  687. return rc;
  688. }
  689. rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
  690. IPA_CMD_ASS_START, NULL);
  691. if (rc) {
  692. dev_warn(&card->gdev->dev,
  693. "Enabling the passthrough mode for %s failed\n",
  694. netdev_name(card->dev));
  695. return rc;
  696. }
  697. out:
  698. dev_info(&card->gdev->dev, "IPV6 enabled\n");
  699. return 0;
  700. }
  701. static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
  702. {
  703. QETH_CARD_TEXT(card, 3, "strtipv6");
  704. if (!qeth_is_supported(card, IPA_IPV6)) {
  705. dev_info(&card->gdev->dev,
  706. "IPv6 not supported on %s\n", netdev_name(card->dev));
  707. return 0;
  708. }
  709. return qeth_l3_softsetup_ipv6(card);
  710. }
  711. static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
  712. {
  713. u32 filter_data = 1;
  714. int rc;
  715. QETH_CARD_TEXT(card, 3, "stbrdcst");
  716. card->info.broadcast_capable = 0;
  717. if (!qeth_is_supported(card, IPA_FILTERING)) {
  718. dev_info(&card->gdev->dev,
  719. "Broadcast not supported on %s\n",
  720. netdev_name(card->dev));
  721. rc = -EOPNOTSUPP;
  722. goto out;
  723. }
  724. rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
  725. IPA_CMD_ASS_START, NULL);
  726. if (rc) {
  727. dev_warn(&card->gdev->dev,
  728. "Enabling broadcast filtering for %s failed\n",
  729. netdev_name(card->dev));
  730. goto out;
  731. }
  732. rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
  733. IPA_CMD_ASS_CONFIGURE, &filter_data);
  734. if (rc) {
  735. dev_warn(&card->gdev->dev,
  736. "Setting up broadcast filtering for %s failed\n",
  737. netdev_name(card->dev));
  738. goto out;
  739. }
  740. card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
  741. dev_info(&card->gdev->dev, "Broadcast enabled\n");
  742. rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
  743. IPA_CMD_ASS_ENABLE, &filter_data);
  744. if (rc) {
  745. dev_warn(&card->gdev->dev,
  746. "Setting up broadcast echo filtering for %s failed\n",
  747. netdev_name(card->dev));
  748. goto out;
  749. }
  750. card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
  751. out:
  752. if (card->info.broadcast_capable)
  753. card->dev->flags |= IFF_BROADCAST;
  754. else
  755. card->dev->flags &= ~IFF_BROADCAST;
  756. return rc;
  757. }
  758. static void qeth_l3_start_ipassists(struct qeth_card *card)
  759. {
  760. QETH_CARD_TEXT(card, 3, "strtipas");
  761. qeth_l3_start_ipa_arp_processing(card); /* go on*/
  762. qeth_l3_start_ipa_source_mac(card); /* go on*/
  763. qeth_l3_start_ipa_vlan(card); /* go on*/
  764. qeth_l3_start_ipa_multicast(card); /* go on*/
  765. qeth_l3_start_ipa_ipv6(card); /* go on*/
  766. qeth_l3_start_ipa_broadcast(card); /* go on*/
  767. }
  768. static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
  769. struct qeth_reply *reply, unsigned long data)
  770. {
  771. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  772. if (cmd->hdr.return_code)
  773. return -EIO;
  774. if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
  775. return -EADDRNOTAVAIL;
  776. eth_hw_addr_set(card->dev, cmd->data.create_destroy_addr.mac_addr);
  777. return 0;
  778. }
  779. static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
  780. {
  781. int rc = 0;
  782. struct qeth_cmd_buffer *iob;
  783. QETH_CARD_TEXT(card, 2, "hsrmac");
  784. iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
  785. IPA_DATA_SIZEOF(create_destroy_addr));
  786. if (!iob)
  787. return -ENOMEM;
  788. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
  789. NULL);
  790. return rc;
  791. }
  792. static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
  793. struct qeth_reply *reply, unsigned long data)
  794. {
  795. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  796. u16 *uid = reply->param;
  797. if (cmd->hdr.return_code == 0) {
  798. *uid = cmd->data.create_destroy_addr.uid;
  799. return 0;
  800. }
  801. dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
  802. return -EIO;
  803. }
  804. static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
  805. {
  806. struct qeth_cmd_buffer *iob;
  807. QETH_CARD_TEXT(card, 2, "guniqeid");
  808. if (!qeth_is_supported(card, IPA_IPV6))
  809. goto out;
  810. iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
  811. IPA_DATA_SIZEOF(create_destroy_addr));
  812. if (!iob)
  813. goto out;
  814. __ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
  815. qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
  816. out:
  817. return uid;
  818. }
  819. static int
  820. qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
  821. unsigned long data)
  822. {
  823. struct qeth_ipa_cmd *cmd;
  824. __u16 rc;
  825. QETH_CARD_TEXT(card, 2, "diastrcb");
  826. cmd = (struct qeth_ipa_cmd *)data;
  827. rc = cmd->hdr.return_code;
  828. if (rc)
  829. QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
  830. switch (cmd->data.diagass.action) {
  831. case QETH_DIAGS_CMD_TRACE_QUERY:
  832. break;
  833. case QETH_DIAGS_CMD_TRACE_DISABLE:
  834. switch (rc) {
  835. case 0:
  836. case IPA_RC_INVALID_SUBCMD:
  837. card->info.promisc_mode = SET_PROMISC_MODE_OFF;
  838. dev_info(&card->gdev->dev, "The HiperSockets network "
  839. "traffic analyzer is deactivated\n");
  840. break;
  841. default:
  842. break;
  843. }
  844. break;
  845. case QETH_DIAGS_CMD_TRACE_ENABLE:
  846. switch (rc) {
  847. case 0:
  848. card->info.promisc_mode = SET_PROMISC_MODE_ON;
  849. dev_info(&card->gdev->dev, "The HiperSockets network "
  850. "traffic analyzer is activated\n");
  851. break;
  852. case IPA_RC_HARDWARE_AUTH_ERROR:
  853. dev_warn(&card->gdev->dev, "The device is not "
  854. "authorized to run as a HiperSockets network "
  855. "traffic analyzer\n");
  856. break;
  857. case IPA_RC_TRACE_ALREADY_ACTIVE:
  858. dev_warn(&card->gdev->dev, "A HiperSockets "
  859. "network traffic analyzer is already "
  860. "active in the HiperSockets LAN\n");
  861. break;
  862. default:
  863. break;
  864. }
  865. break;
  866. default:
  867. QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
  868. cmd->data.diagass.action, CARD_DEVID(card));
  869. }
  870. return rc ? -EIO : 0;
  871. }
  872. static int
  873. qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
  874. {
  875. struct qeth_cmd_buffer *iob;
  876. struct qeth_ipa_cmd *cmd;
  877. QETH_CARD_TEXT(card, 2, "diagtrac");
  878. iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0);
  879. if (!iob)
  880. return -ENOMEM;
  881. cmd = __ipa_cmd(iob);
  882. cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
  883. cmd->data.diagass.action = diags_cmd;
  884. return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
  885. }
  886. static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
  887. {
  888. struct qeth_card *card = arg;
  889. struct inet6_dev *in6_dev;
  890. struct in_device *in4_dev;
  891. struct qeth_ipaddr *ipm;
  892. struct qeth_ipaddr tmp;
  893. struct ip_mc_list *im4;
  894. struct ifmcaddr6 *im6;
  895. QETH_CARD_TEXT(card, 4, "addmc");
  896. if (!dev || !(dev->flags & IFF_UP))
  897. goto out;
  898. in4_dev = __in_dev_get_rtnl(dev);
  899. if (!in4_dev)
  900. goto walk_ipv6;
  901. qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
  902. tmp.disp_flag = QETH_DISP_ADDR_ADD;
  903. tmp.is_multicast = 1;
  904. for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL;
  905. im4 = rtnl_dereference(im4->next_rcu)) {
  906. tmp.u.a4.addr = im4->multiaddr;
  907. ipm = qeth_l3_find_addr_by_ip(card, &tmp);
  908. if (ipm) {
  909. /* for mcast, by-IP match means full match */
  910. ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  911. continue;
  912. }
  913. ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL);
  914. if (!ipm)
  915. continue;
  916. hash_add(card->rx_mode_addrs, &ipm->hnode,
  917. qeth_l3_ipaddr_hash(ipm));
  918. }
  919. walk_ipv6:
  920. if (!qeth_is_supported(card, IPA_IPV6))
  921. goto out;
  922. in6_dev = __in6_dev_get(dev);
  923. if (!in6_dev)
  924. goto out;
  925. qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
  926. tmp.disp_flag = QETH_DISP_ADDR_ADD;
  927. tmp.is_multicast = 1;
  928. for (im6 = rtnl_dereference(in6_dev->mc_list);
  929. im6;
  930. im6 = rtnl_dereference(im6->next)) {
  931. tmp.u.a6.addr = im6->mca_addr;
  932. ipm = qeth_l3_find_addr_by_ip(card, &tmp);
  933. if (ipm) {
  934. /* for mcast, by-IP match means full match */
  935. ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
  936. continue;
  937. }
  938. ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC);
  939. if (!ipm)
  940. continue;
  941. hash_add(card->rx_mode_addrs, &ipm->hnode,
  942. qeth_l3_ipaddr_hash(ipm));
  943. }
  944. out:
  945. return 0;
  946. }
  947. static void qeth_l3_set_promisc_mode(struct qeth_card *card)
  948. {
  949. bool enable = card->dev->flags & IFF_PROMISC;
  950. if (card->info.promisc_mode == enable)
  951. return;
  952. if (IS_VM_NIC(card)) { /* Guestlan trace */
  953. if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
  954. qeth_setadp_promisc_mode(card, enable);
  955. } else if (card->options.sniffer && /* HiperSockets trace */
  956. qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
  957. if (enable) {
  958. QETH_CARD_TEXT(card, 3, "+promisc");
  959. qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
  960. } else {
  961. QETH_CARD_TEXT(card, 3, "-promisc");
  962. qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
  963. }
  964. }
  965. }
  966. static void qeth_l3_rx_mode_work(struct work_struct *work)
  967. {
  968. struct qeth_card *card = container_of(work, struct qeth_card,
  969. rx_mode_work);
  970. struct qeth_ipaddr *addr;
  971. struct hlist_node *tmp;
  972. int i, rc;
  973. QETH_CARD_TEXT(card, 3, "setmulti");
  974. if (!card->options.sniffer) {
  975. rtnl_lock();
  976. qeth_l3_add_mcast_rtnl(card->dev, 0, card);
  977. if (qeth_is_supported(card, IPA_FULL_VLAN))
  978. vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
  979. rtnl_unlock();
  980. hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
  981. switch (addr->disp_flag) {
  982. case QETH_DISP_ADDR_DELETE:
  983. rc = qeth_l3_deregister_addr_entry(card, addr);
  984. if (!rc || rc == -ENOENT) {
  985. hash_del(&addr->hnode);
  986. kfree(addr);
  987. }
  988. break;
  989. case QETH_DISP_ADDR_ADD:
  990. rc = qeth_l3_register_addr_entry(card, addr);
  991. if (rc && rc != -ENETDOWN) {
  992. hash_del(&addr->hnode);
  993. kfree(addr);
  994. break;
  995. }
  996. fallthrough;
  997. default:
  998. /* for next call to set_rx_mode(): */
  999. addr->disp_flag = QETH_DISP_ADDR_DELETE;
  1000. }
  1001. }
  1002. }
  1003. qeth_l3_set_promisc_mode(card);
  1004. }
  1005. static int qeth_l3_arp_makerc(u16 rc)
  1006. {
  1007. switch (rc) {
  1008. case IPA_RC_SUCCESS:
  1009. return 0;
  1010. case QETH_IPA_ARP_RC_NOTSUPP:
  1011. case QETH_IPA_ARP_RC_Q_NOTSUPP:
  1012. return -EOPNOTSUPP;
  1013. case QETH_IPA_ARP_RC_OUT_OF_RANGE:
  1014. return -EINVAL;
  1015. case QETH_IPA_ARP_RC_Q_NO_DATA:
  1016. return -ENOENT;
  1017. default:
  1018. return -EIO;
  1019. }
  1020. }
  1021. static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply,
  1022. unsigned long data)
  1023. {
  1024. struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
  1025. qeth_setassparms_cb(card, reply, data);
  1026. return qeth_l3_arp_makerc(cmd->hdr.return_code);
  1027. }
  1028. static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
  1029. {
  1030. struct qeth_cmd_buffer *iob;
  1031. int rc;
  1032. QETH_CARD_TEXT(card, 3, "arpstnoe");
  1033. /*
  1034. * currently GuestLAN only supports the ARP assist function
  1035. * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
  1036. * thus we say EOPNOTSUPP for this ARP function
  1037. */
  1038. if (IS_VM_NIC(card))
  1039. return -EOPNOTSUPP;
  1040. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  1041. return -EOPNOTSUPP;
  1042. }
  1043. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
  1044. IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
  1045. SETASS_DATA_SIZEOF(flags_32bit),
  1046. QETH_PROT_IPV4);
  1047. if (!iob)
  1048. return -ENOMEM;
  1049. __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries;
  1050. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
  1051. if (rc)
  1052. QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
  1053. CARD_DEVID(card), rc);
  1054. return rc;
  1055. }
  1056. static __u32 get_arp_entry_size(struct qeth_card *card,
  1057. struct qeth_arp_query_data *qdata,
  1058. struct qeth_arp_entrytype *type, __u8 strip_entries)
  1059. {
  1060. __u32 rc;
  1061. __u8 is_hsi;
  1062. is_hsi = qdata->reply_bits == 5;
  1063. if (type->ip == QETHARP_IP_ADDR_V4) {
  1064. QETH_CARD_TEXT(card, 4, "arpev4");
  1065. if (strip_entries) {
  1066. rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
  1067. sizeof(struct qeth_arp_qi_entry7_short);
  1068. } else {
  1069. rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
  1070. sizeof(struct qeth_arp_qi_entry7);
  1071. }
  1072. } else if (type->ip == QETHARP_IP_ADDR_V6) {
  1073. QETH_CARD_TEXT(card, 4, "arpev6");
  1074. if (strip_entries) {
  1075. rc = is_hsi ?
  1076. sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
  1077. sizeof(struct qeth_arp_qi_entry7_short_ipv6);
  1078. } else {
  1079. rc = is_hsi ?
  1080. sizeof(struct qeth_arp_qi_entry5_ipv6) :
  1081. sizeof(struct qeth_arp_qi_entry7_ipv6);
  1082. }
  1083. } else {
  1084. QETH_CARD_TEXT(card, 4, "arpinv");
  1085. rc = 0;
  1086. }
  1087. return rc;
  1088. }
  1089. static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
  1090. {
  1091. return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
  1092. (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
  1093. }
  1094. static int qeth_l3_arp_query_cb(struct qeth_card *card,
  1095. struct qeth_reply *reply, unsigned long data)
  1096. {
  1097. struct qeth_ipa_cmd *cmd;
  1098. struct qeth_arp_query_data *qdata;
  1099. struct qeth_arp_query_info *qinfo;
  1100. int e;
  1101. int entrybytes_done;
  1102. int stripped_bytes;
  1103. __u8 do_strip_entries;
  1104. QETH_CARD_TEXT(card, 3, "arpquecb");
  1105. qinfo = (struct qeth_arp_query_info *) reply->param;
  1106. cmd = (struct qeth_ipa_cmd *) data;
  1107. QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
  1108. if (cmd->hdr.return_code) {
  1109. QETH_CARD_TEXT(card, 4, "arpcberr");
  1110. QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
  1111. return qeth_l3_arp_makerc(cmd->hdr.return_code);
  1112. }
  1113. if (cmd->data.setassparms.hdr.return_code) {
  1114. cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
  1115. QETH_CARD_TEXT(card, 4, "setaperr");
  1116. QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
  1117. return qeth_l3_arp_makerc(cmd->hdr.return_code);
  1118. }
  1119. qdata = &cmd->data.setassparms.data.query_arp;
  1120. QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
  1121. do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
  1122. stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
  1123. entrybytes_done = 0;
  1124. for (e = 0; e < qdata->no_entries; ++e) {
  1125. char *cur_entry;
  1126. __u32 esize;
  1127. struct qeth_arp_entrytype *etype;
  1128. cur_entry = &qdata->data + entrybytes_done;
  1129. etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
  1130. if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
  1131. QETH_CARD_TEXT(card, 4, "pmis");
  1132. QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
  1133. break;
  1134. }
  1135. esize = get_arp_entry_size(card, qdata, etype,
  1136. do_strip_entries);
  1137. QETH_CARD_TEXT_(card, 5, "esz%i", esize);
  1138. if (!esize)
  1139. break;
  1140. if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
  1141. QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC);
  1142. memset(qinfo->udata, 0, 4);
  1143. return -ENOSPC;
  1144. }
  1145. memcpy(qinfo->udata + qinfo->udata_offset,
  1146. &qdata->data + entrybytes_done + stripped_bytes,
  1147. esize);
  1148. entrybytes_done += esize + stripped_bytes;
  1149. qinfo->udata_offset += esize;
  1150. ++qinfo->no_entries;
  1151. }
  1152. /* check if all replies received ... */
  1153. if (cmd->data.setassparms.hdr.seq_no <
  1154. cmd->data.setassparms.hdr.number_of_replies)
  1155. return 1;
  1156. QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
  1157. memcpy(qinfo->udata, &qinfo->no_entries, 4);
  1158. /* keep STRIP_ENTRIES flag so the user program can distinguish
  1159. * stripped entries from normal ones */
  1160. if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
  1161. qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
  1162. memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
  1163. QETH_CARD_TEXT_(card, 4, "rc%i", 0);
  1164. return 0;
  1165. }
  1166. static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
  1167. enum qeth_prot_versions prot,
  1168. struct qeth_arp_query_info *qinfo)
  1169. {
  1170. struct qeth_cmd_buffer *iob;
  1171. struct qeth_ipa_cmd *cmd;
  1172. int rc;
  1173. QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
  1174. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
  1175. IPA_CMD_ASS_ARP_QUERY_INFO,
  1176. SETASS_DATA_SIZEOF(query_arp), prot);
  1177. if (!iob)
  1178. return -ENOMEM;
  1179. cmd = __ipa_cmd(iob);
  1180. cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
  1181. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo);
  1182. if (rc)
  1183. QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
  1184. CARD_DEVID(card), rc);
  1185. return rc;
  1186. }
  1187. static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
  1188. {
  1189. struct qeth_arp_query_info qinfo = {0, };
  1190. int rc;
  1191. QETH_CARD_TEXT(card, 3, "arpquery");
  1192. if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
  1193. IPA_ARP_PROCESSING)) {
  1194. QETH_CARD_TEXT(card, 3, "arpqnsup");
  1195. rc = -EOPNOTSUPP;
  1196. goto out;
  1197. }
  1198. /* get size of userspace buffer and mask_bits -> 6 bytes */
  1199. if (copy_from_user(&qinfo, udata, 6)) {
  1200. rc = -EFAULT;
  1201. goto out;
  1202. }
  1203. qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
  1204. if (!qinfo.udata) {
  1205. rc = -ENOMEM;
  1206. goto out;
  1207. }
  1208. qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
  1209. rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
  1210. if (rc) {
  1211. if (copy_to_user(udata, qinfo.udata, 4))
  1212. rc = -EFAULT;
  1213. goto free_and_out;
  1214. }
  1215. if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
  1216. /* fails in case of GuestLAN QDIO mode */
  1217. qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
  1218. }
  1219. if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
  1220. QETH_CARD_TEXT(card, 4, "qactf");
  1221. rc = -EFAULT;
  1222. goto free_and_out;
  1223. }
  1224. QETH_CARD_TEXT(card, 4, "qacts");
  1225. free_and_out:
  1226. kfree(qinfo.udata);
  1227. out:
  1228. return rc;
  1229. }
  1230. static int qeth_l3_arp_modify_entry(struct qeth_card *card,
  1231. struct qeth_arp_cache_entry *entry,
  1232. enum qeth_arp_process_subcmds arp_cmd)
  1233. {
  1234. struct qeth_arp_cache_entry *cmd_entry;
  1235. struct qeth_cmd_buffer *iob;
  1236. int rc;
  1237. if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
  1238. QETH_CARD_TEXT(card, 3, "arpadd");
  1239. else
  1240. QETH_CARD_TEXT(card, 3, "arpdel");
  1241. /*
  1242. * currently GuestLAN only supports the ARP assist function
  1243. * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
  1244. * thus we say EOPNOTSUPP for this ARP function
  1245. */
  1246. if (IS_VM_NIC(card))
  1247. return -EOPNOTSUPP;
  1248. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  1249. return -EOPNOTSUPP;
  1250. }
  1251. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
  1252. SETASS_DATA_SIZEOF(arp_entry),
  1253. QETH_PROT_IPV4);
  1254. if (!iob)
  1255. return -ENOMEM;
  1256. cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
  1257. ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
  1258. memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
  1259. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
  1260. if (rc)
  1261. QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
  1262. arp_cmd, CARD_DEVID(card), rc);
  1263. return rc;
  1264. }
  1265. static int qeth_l3_arp_flush_cache(struct qeth_card *card)
  1266. {
  1267. struct qeth_cmd_buffer *iob;
  1268. int rc;
  1269. QETH_CARD_TEXT(card, 3, "arpflush");
  1270. /*
  1271. * currently GuestLAN only supports the ARP assist function
  1272. * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
  1273. * thus we say EOPNOTSUPP for this ARP function
  1274. */
  1275. if (IS_VM_NIC(card) || IS_IQD(card))
  1276. return -EOPNOTSUPP;
  1277. if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
  1278. return -EOPNOTSUPP;
  1279. }
  1280. iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
  1281. IPA_CMD_ASS_ARP_FLUSH_CACHE, 0,
  1282. QETH_PROT_IPV4);
  1283. if (!iob)
  1284. return -ENOMEM;
  1285. rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL);
  1286. if (rc)
  1287. QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
  1288. CARD_DEVID(card), rc);
  1289. return rc;
  1290. }
  1291. static int qeth_l3_ndo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
  1292. void __user *data, int cmd)
  1293. {
  1294. struct qeth_card *card = dev->ml_priv;
  1295. struct qeth_arp_cache_entry arp_entry;
  1296. enum qeth_arp_process_subcmds arp_cmd;
  1297. int rc = 0;
  1298. switch (cmd) {
  1299. case SIOC_QETH_ARP_SET_NO_ENTRIES:
  1300. if (!capable(CAP_NET_ADMIN)) {
  1301. rc = -EPERM;
  1302. break;
  1303. }
  1304. rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
  1305. break;
  1306. case SIOC_QETH_ARP_QUERY_INFO:
  1307. if (!capable(CAP_NET_ADMIN)) {
  1308. rc = -EPERM;
  1309. break;
  1310. }
  1311. rc = qeth_l3_arp_query(card, data);
  1312. break;
  1313. case SIOC_QETH_ARP_ADD_ENTRY:
  1314. case SIOC_QETH_ARP_REMOVE_ENTRY:
  1315. if (!capable(CAP_NET_ADMIN))
  1316. return -EPERM;
  1317. if (copy_from_user(&arp_entry, data, sizeof(arp_entry)))
  1318. return -EFAULT;
  1319. arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
  1320. IPA_CMD_ASS_ARP_ADD_ENTRY :
  1321. IPA_CMD_ASS_ARP_REMOVE_ENTRY;
  1322. return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
  1323. case SIOC_QETH_ARP_FLUSH_CACHE:
  1324. if (!capable(CAP_NET_ADMIN)) {
  1325. rc = -EPERM;
  1326. break;
  1327. }
  1328. rc = qeth_l3_arp_flush_cache(card);
  1329. break;
  1330. default:
  1331. rc = qeth_siocdevprivate(dev, rq, data, cmd);
  1332. }
  1333. return rc;
  1334. }
  1335. static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst,
  1336. __be16 proto)
  1337. {
  1338. struct neighbour *n = NULL;
  1339. if (dst)
  1340. n = dst_neigh_lookup_skb(dst, skb);
  1341. if (n) {
  1342. int cast_type = n->type;
  1343. neigh_release(n);
  1344. if ((cast_type == RTN_BROADCAST) ||
  1345. (cast_type == RTN_MULTICAST) ||
  1346. (cast_type == RTN_ANYCAST))
  1347. return cast_type;
  1348. return RTN_UNICAST;
  1349. }
  1350. /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
  1351. switch (proto) {
  1352. case htons(ETH_P_IP):
  1353. if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
  1354. return RTN_BROADCAST;
  1355. return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
  1356. RTN_MULTICAST : RTN_UNICAST;
  1357. case htons(ETH_P_IPV6):
  1358. return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
  1359. RTN_MULTICAST : RTN_UNICAST;
  1360. case htons(ETH_P_AF_IUCV):
  1361. return RTN_UNICAST;
  1362. default:
  1363. /* OSA only: ... and MAC address */
  1364. return qeth_get_ether_cast_type(skb);
  1365. }
  1366. }
  1367. static int qeth_l3_get_cast_type(struct sk_buff *skb, __be16 proto)
  1368. {
  1369. struct dst_entry *dst;
  1370. int cast_type;
  1371. rcu_read_lock();
  1372. dst = qeth_dst_check_rcu(skb, proto);
  1373. cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
  1374. rcu_read_unlock();
  1375. return cast_type;
  1376. }
  1377. static u8 qeth_l3_cast_type_to_flag(int cast_type)
  1378. {
  1379. if (cast_type == RTN_MULTICAST)
  1380. return QETH_CAST_MULTICAST;
  1381. if (cast_type == RTN_ANYCAST)
  1382. return QETH_CAST_ANYCAST;
  1383. if (cast_type == RTN_BROADCAST)
  1384. return QETH_CAST_BROADCAST;
  1385. return QETH_CAST_UNICAST;
  1386. }
  1387. static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
  1388. struct qeth_hdr *hdr, struct sk_buff *skb,
  1389. __be16 proto, unsigned int data_len)
  1390. {
  1391. struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
  1392. struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
  1393. struct qeth_card *card = queue->card;
  1394. struct dst_entry *dst;
  1395. int cast_type;
  1396. hdr->hdr.l3.length = data_len;
  1397. if (skb_is_gso(skb)) {
  1398. hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
  1399. } else {
  1400. hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
  1401. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1402. qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, proto);
  1403. /* some HW requires combined L3+L4 csum offload: */
  1404. if (proto == htons(ETH_P_IP))
  1405. hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
  1406. }
  1407. }
  1408. if (proto == htons(ETH_P_IP) || IS_IQD(card)) {
  1409. /* NETIF_F_HW_VLAN_CTAG_TX */
  1410. if (skb_vlan_tag_present(skb)) {
  1411. hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
  1412. hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
  1413. }
  1414. } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
  1415. hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG;
  1416. hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
  1417. }
  1418. rcu_read_lock();
  1419. dst = qeth_dst_check_rcu(skb, proto);
  1420. if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ)
  1421. cast_type = RTN_UNICAST;
  1422. else
  1423. cast_type = qeth_l3_get_cast_type_rcu(skb, dst, proto);
  1424. l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type);
  1425. switch (proto) {
  1426. case htons(ETH_P_IP):
  1427. l3_hdr->next_hop.addr.s6_addr32[3] =
  1428. qeth_next_hop_v4_rcu(skb, dst);
  1429. break;
  1430. case htons(ETH_P_IPV6):
  1431. l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst);
  1432. hdr->hdr.l3.flags |= QETH_HDR_IPV6;
  1433. if (!IS_IQD(card))
  1434. hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
  1435. break;
  1436. case htons(ETH_P_AF_IUCV):
  1437. l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80);
  1438. memcpy(&l3_hdr->next_hop.addr.s6_addr32[2],
  1439. iucv_trans_hdr(skb)->destUserID, 8);
  1440. l3_hdr->flags |= QETH_HDR_IPV6;
  1441. break;
  1442. default:
  1443. /* OSA only: */
  1444. l3_hdr->flags |= QETH_HDR_PASSTHRU;
  1445. }
  1446. rcu_read_unlock();
  1447. }
  1448. static void qeth_l3_fixup_headers(struct sk_buff *skb)
  1449. {
  1450. struct iphdr *iph = ip_hdr(skb);
  1451. /* this is safe, IPv6 traffic takes a different path */
  1452. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1453. iph->check = 0;
  1454. if (skb_is_gso(skb)) {
  1455. iph->tot_len = 0;
  1456. tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
  1457. iph->daddr, 0);
  1458. }
  1459. }
  1460. static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
  1461. struct qeth_qdio_out_q *queue, __be16 proto)
  1462. {
  1463. unsigned int hw_hdr_len;
  1464. int rc;
  1465. /* re-use the L2 header area for the HW header: */
  1466. hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
  1467. sizeof(struct qeth_hdr);
  1468. rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
  1469. if (rc)
  1470. return rc;
  1471. skb_pull(skb, ETH_HLEN);
  1472. qeth_l3_fixup_headers(skb);
  1473. return qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
  1474. }
  1475. static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
  1476. struct net_device *dev)
  1477. {
  1478. struct qeth_card *card = dev->ml_priv;
  1479. __be16 proto = vlan_get_protocol(skb);
  1480. u16 txq = skb_get_queue_mapping(skb);
  1481. struct qeth_qdio_out_q *queue;
  1482. int rc;
  1483. if (!skb_is_gso(skb))
  1484. qdisc_skb_cb(skb)->pkt_len = skb->len;
  1485. if (IS_IQD(card)) {
  1486. queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
  1487. if (card->options.sniffer)
  1488. goto tx_drop;
  1489. switch (proto) {
  1490. case htons(ETH_P_AF_IUCV):
  1491. if (card->options.cq != QETH_CQ_ENABLED)
  1492. goto tx_drop;
  1493. break;
  1494. case htons(ETH_P_IP):
  1495. case htons(ETH_P_IPV6):
  1496. if (card->options.cq == QETH_CQ_ENABLED)
  1497. goto tx_drop;
  1498. break;
  1499. default:
  1500. goto tx_drop;
  1501. }
  1502. } else {
  1503. queue = card->qdio.out_qs[txq];
  1504. }
  1505. if (!(dev->flags & IFF_BROADCAST) &&
  1506. qeth_l3_get_cast_type(skb, proto) == RTN_BROADCAST)
  1507. goto tx_drop;
  1508. if (proto == htons(ETH_P_IP) || IS_IQD(card))
  1509. rc = qeth_l3_xmit(card, skb, queue, proto);
  1510. else
  1511. rc = qeth_xmit(card, skb, queue, proto, qeth_l3_fill_header);
  1512. if (!rc)
  1513. return NETDEV_TX_OK;
  1514. tx_drop:
  1515. QETH_TXQ_STAT_INC(queue, tx_dropped);
  1516. kfree_skb(skb);
  1517. return NETDEV_TX_OK;
  1518. }
  1519. static void qeth_l3_set_rx_mode(struct net_device *dev)
  1520. {
  1521. struct qeth_card *card = dev->ml_priv;
  1522. schedule_work(&card->rx_mode_work);
  1523. }
  1524. /*
  1525. * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
  1526. * NOARP on the netdevice is no option because it also turns off neighbor
  1527. * solicitation. For IPv4 we install a neighbor_setup function. We don't want
  1528. * arp resolution but we want the hard header (packet socket will work
  1529. * e.g. tcpdump)
  1530. */
  1531. static int qeth_l3_neigh_setup_noarp(struct neighbour *n)
  1532. {
  1533. n->nud_state = NUD_NOARP;
  1534. memcpy(n->ha, "FAKELL", 6);
  1535. n->output = n->ops->connected_output;
  1536. return 0;
  1537. }
  1538. static int
  1539. qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
  1540. {
  1541. if (np->tbl->family == AF_INET)
  1542. np->neigh_setup = qeth_l3_neigh_setup_noarp;
  1543. return 0;
  1544. }
  1545. static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
  1546. struct net_device *dev,
  1547. netdev_features_t features)
  1548. {
  1549. if (vlan_get_protocol(skb) != htons(ETH_P_IP))
  1550. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  1551. return qeth_features_check(skb, dev, features);
  1552. }
  1553. static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
  1554. struct net_device *sb_dev)
  1555. {
  1556. __be16 proto = vlan_get_protocol(skb);
  1557. return qeth_iqd_select_queue(dev, skb,
  1558. qeth_l3_get_cast_type(skb, proto), sb_dev);
  1559. }
  1560. static const struct net_device_ops qeth_l3_netdev_ops = {
  1561. .ndo_open = qeth_open,
  1562. .ndo_stop = qeth_stop,
  1563. .ndo_get_stats64 = qeth_get_stats64,
  1564. .ndo_start_xmit = qeth_l3_hard_start_xmit,
  1565. .ndo_select_queue = qeth_l3_iqd_select_queue,
  1566. .ndo_validate_addr = eth_validate_addr,
  1567. .ndo_set_rx_mode = qeth_l3_set_rx_mode,
  1568. .ndo_eth_ioctl = qeth_do_ioctl,
  1569. .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
  1570. .ndo_fix_features = qeth_fix_features,
  1571. .ndo_set_features = qeth_set_features,
  1572. .ndo_tx_timeout = qeth_tx_timeout,
  1573. };
  1574. static const struct net_device_ops qeth_l3_osa_netdev_ops = {
  1575. .ndo_open = qeth_open,
  1576. .ndo_stop = qeth_stop,
  1577. .ndo_get_stats64 = qeth_get_stats64,
  1578. .ndo_start_xmit = qeth_l3_hard_start_xmit,
  1579. .ndo_features_check = qeth_l3_osa_features_check,
  1580. .ndo_select_queue = qeth_osa_select_queue,
  1581. .ndo_validate_addr = eth_validate_addr,
  1582. .ndo_set_rx_mode = qeth_l3_set_rx_mode,
  1583. .ndo_eth_ioctl = qeth_do_ioctl,
  1584. .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
  1585. .ndo_fix_features = qeth_fix_features,
  1586. .ndo_set_features = qeth_set_features,
  1587. .ndo_tx_timeout = qeth_tx_timeout,
  1588. .ndo_neigh_setup = qeth_l3_neigh_setup,
  1589. };
  1590. static int qeth_l3_setup_netdev(struct qeth_card *card)
  1591. {
  1592. struct net_device *dev = card->dev;
  1593. unsigned int headroom;
  1594. int rc;
  1595. if (IS_OSD(card) || IS_OSX(card)) {
  1596. card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
  1597. /*IPv6 address autoconfiguration stuff*/
  1598. dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
  1599. if (!IS_VM_NIC(card)) {
  1600. card->dev->features |= NETIF_F_SG;
  1601. card->dev->hw_features |= NETIF_F_TSO |
  1602. NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
  1603. card->dev->vlan_features |= NETIF_F_TSO |
  1604. NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
  1605. }
  1606. if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
  1607. card->dev->hw_features |= NETIF_F_IPV6_CSUM;
  1608. card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
  1609. }
  1610. if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
  1611. card->dev->hw_features |= NETIF_F_TSO6;
  1612. card->dev->vlan_features |= NETIF_F_TSO6;
  1613. }
  1614. /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
  1615. if (card->dev->hw_features & NETIF_F_TSO6)
  1616. headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
  1617. else if (card->dev->hw_features & NETIF_F_TSO)
  1618. headroom = sizeof(struct qeth_hdr_tso);
  1619. else
  1620. headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
  1621. } else if (IS_IQD(card)) {
  1622. card->dev->flags |= IFF_NOARP;
  1623. card->dev->netdev_ops = &qeth_l3_netdev_ops;
  1624. headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
  1625. rc = qeth_l3_iqd_read_initial_mac(card);
  1626. if (rc)
  1627. return rc;
  1628. } else
  1629. return -ENODEV;
  1630. card->dev->needed_headroom = headroom;
  1631. card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
  1632. NETIF_F_HW_VLAN_CTAG_RX;
  1633. netif_keep_dst(card->dev);
  1634. if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
  1635. netif_set_tso_max_size(card->dev,
  1636. PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
  1637. netif_napi_add(card->dev, &card->napi, qeth_poll);
  1638. return register_netdev(card->dev);
  1639. }
  1640. static const struct device_type qeth_l3_devtype = {
  1641. .name = "qeth_layer3",
  1642. .groups = qeth_l3_attr_groups,
  1643. };
  1644. static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
  1645. {
  1646. struct qeth_card *card = dev_get_drvdata(&gdev->dev);
  1647. int rc;
  1648. hash_init(card->ip_htable);
  1649. mutex_init(&card->ip_lock);
  1650. card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0,
  1651. dev_name(&gdev->dev));
  1652. if (!card->cmd_wq)
  1653. return -ENOMEM;
  1654. if (gdev->dev.type) {
  1655. rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups);
  1656. if (rc) {
  1657. destroy_workqueue(card->cmd_wq);
  1658. return rc;
  1659. }
  1660. } else {
  1661. gdev->dev.type = &qeth_l3_devtype;
  1662. }
  1663. INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
  1664. return 0;
  1665. }
  1666. static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
  1667. {
  1668. struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
  1669. if (cgdev->dev.type != &qeth_l3_devtype)
  1670. device_remove_groups(&cgdev->dev, qeth_l3_attr_groups);
  1671. qeth_set_allowed_threads(card, 0, 1);
  1672. wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
  1673. if (cgdev->state == CCWGROUP_ONLINE)
  1674. qeth_set_offline(card, card->discipline, false);
  1675. if (card->dev->reg_state == NETREG_REGISTERED)
  1676. unregister_netdev(card->dev);
  1677. destroy_workqueue(card->cmd_wq);
  1678. qeth_l3_clear_ip_htable(card, 0);
  1679. qeth_l3_clear_ipato_list(card);
  1680. }
  1681. static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
  1682. {
  1683. struct net_device *dev = card->dev;
  1684. int rc = 0;
  1685. /* softsetup */
  1686. QETH_CARD_TEXT(card, 2, "softsetp");
  1687. rc = qeth_l3_setadapter_parms(card);
  1688. if (rc)
  1689. QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
  1690. if (!card->options.sniffer) {
  1691. qeth_l3_start_ipassists(card);
  1692. rc = qeth_l3_setrouting_v4(card);
  1693. if (rc)
  1694. QETH_CARD_TEXT_(card, 2, "4err%04x", rc);
  1695. rc = qeth_l3_setrouting_v6(card);
  1696. if (rc)
  1697. QETH_CARD_TEXT_(card, 2, "5err%04x", rc);
  1698. }
  1699. card->state = CARD_STATE_SOFTSETUP;
  1700. qeth_set_allowed_threads(card, 0xffffffff, 0);
  1701. qeth_l3_recover_ip(card);
  1702. if (dev->reg_state != NETREG_REGISTERED) {
  1703. rc = qeth_l3_setup_netdev(card);
  1704. if (rc)
  1705. goto err_setup;
  1706. if (carrier_ok)
  1707. netif_carrier_on(dev);
  1708. } else {
  1709. rtnl_lock();
  1710. rc = qeth_set_real_num_tx_queues(card,
  1711. qeth_tx_actual_queues(card));
  1712. if (rc) {
  1713. rtnl_unlock();
  1714. goto err_set_queues;
  1715. }
  1716. if (carrier_ok)
  1717. netif_carrier_on(dev);
  1718. else
  1719. netif_carrier_off(dev);
  1720. netif_device_attach(dev);
  1721. qeth_enable_hw_features(dev);
  1722. if (netif_running(dev)) {
  1723. local_bh_disable();
  1724. napi_schedule(&card->napi);
  1725. /* kick-start the NAPI softirq: */
  1726. local_bh_enable();
  1727. }
  1728. rtnl_unlock();
  1729. }
  1730. return 0;
  1731. err_set_queues:
  1732. err_setup:
  1733. qeth_set_allowed_threads(card, 0, 1);
  1734. card->state = CARD_STATE_DOWN;
  1735. qeth_l3_clear_ip_htable(card, 1);
  1736. return rc;
  1737. }
  1738. static void qeth_l3_set_offline(struct qeth_card *card)
  1739. {
  1740. qeth_set_allowed_threads(card, 0, 1);
  1741. qeth_l3_drain_rx_mode_cache(card);
  1742. if (card->options.sniffer &&
  1743. (card->info.promisc_mode == SET_PROMISC_MODE_ON))
  1744. qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
  1745. if (card->state == CARD_STATE_SOFTSETUP) {
  1746. card->state = CARD_STATE_DOWN;
  1747. qeth_l3_clear_ip_htable(card, 1);
  1748. }
  1749. }
  1750. /* Returns zero if the command is successfully "consumed" */
  1751. static int qeth_l3_control_event(struct qeth_card *card,
  1752. struct qeth_ipa_cmd *cmd)
  1753. {
  1754. return 1;
  1755. }
  1756. const struct qeth_discipline qeth_l3_discipline = {
  1757. .setup = qeth_l3_probe_device,
  1758. .remove = qeth_l3_remove_device,
  1759. .set_online = qeth_l3_set_online,
  1760. .set_offline = qeth_l3_set_offline,
  1761. .control_event_handler = qeth_l3_control_event,
  1762. };
  1763. EXPORT_SYMBOL_GPL(qeth_l3_discipline);
  1764. static int qeth_l3_handle_ip_event(struct qeth_card *card,
  1765. struct qeth_ipaddr *addr,
  1766. unsigned long event)
  1767. {
  1768. switch (event) {
  1769. case NETDEV_UP:
  1770. qeth_l3_modify_ip(card, addr, true);
  1771. return NOTIFY_OK;
  1772. case NETDEV_DOWN:
  1773. qeth_l3_modify_ip(card, addr, false);
  1774. return NOTIFY_OK;
  1775. default:
  1776. return NOTIFY_DONE;
  1777. }
  1778. }
  1779. struct qeth_l3_ip_event_work {
  1780. struct work_struct work;
  1781. struct qeth_card *card;
  1782. struct qeth_ipaddr addr;
  1783. };
  1784. #define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
  1785. static void qeth_l3_add_ip_worker(struct work_struct *work)
  1786. {
  1787. struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
  1788. qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true);
  1789. kfree(work);
  1790. }
  1791. static void qeth_l3_delete_ip_worker(struct work_struct *work)
  1792. {
  1793. struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
  1794. qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false);
  1795. kfree(work);
  1796. }
  1797. static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
  1798. {
  1799. if (is_vlan_dev(dev))
  1800. dev = vlan_dev_real_dev(dev);
  1801. if (dev->netdev_ops == &qeth_l3_osa_netdev_ops ||
  1802. dev->netdev_ops == &qeth_l3_netdev_ops)
  1803. return (struct qeth_card *) dev->ml_priv;
  1804. return NULL;
  1805. }
  1806. static int qeth_l3_ip_event(struct notifier_block *this,
  1807. unsigned long event, void *ptr)
  1808. {
  1809. struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
  1810. struct net_device *dev = ifa->ifa_dev->dev;
  1811. struct qeth_ipaddr addr;
  1812. struct qeth_card *card;
  1813. card = qeth_l3_get_card_from_dev(dev);
  1814. if (!card)
  1815. return NOTIFY_DONE;
  1816. QETH_CARD_TEXT(card, 3, "ipevent");
  1817. qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
  1818. addr.u.a4.addr = ifa->ifa_address;
  1819. addr.u.a4.mask = ifa->ifa_mask;
  1820. return qeth_l3_handle_ip_event(card, &addr, event);
  1821. }
  1822. static struct notifier_block qeth_l3_ip_notifier = {
  1823. qeth_l3_ip_event,
  1824. NULL,
  1825. };
  1826. static int qeth_l3_ip6_event(struct notifier_block *this,
  1827. unsigned long event, void *ptr)
  1828. {
  1829. struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
  1830. struct net_device *dev = ifa->idev->dev;
  1831. struct qeth_l3_ip_event_work *ip_work;
  1832. struct qeth_card *card;
  1833. if (event != NETDEV_UP && event != NETDEV_DOWN)
  1834. return NOTIFY_DONE;
  1835. card = qeth_l3_get_card_from_dev(dev);
  1836. if (!card)
  1837. return NOTIFY_DONE;
  1838. QETH_CARD_TEXT(card, 3, "ip6event");
  1839. if (!qeth_is_supported(card, IPA_IPV6))
  1840. return NOTIFY_DONE;
  1841. ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC);
  1842. if (!ip_work)
  1843. return NOTIFY_DONE;
  1844. if (event == NETDEV_UP)
  1845. INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
  1846. else
  1847. INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
  1848. ip_work->card = card;
  1849. qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL,
  1850. QETH_PROT_IPV6);
  1851. ip_work->addr.u.a6.addr = ifa->addr;
  1852. ip_work->addr.u.a6.pfxlen = ifa->prefix_len;
  1853. queue_work(card->cmd_wq, &ip_work->work);
  1854. return NOTIFY_OK;
  1855. }
  1856. static struct notifier_block qeth_l3_ip6_notifier = {
  1857. qeth_l3_ip6_event,
  1858. NULL,
  1859. };
  1860. static int qeth_l3_register_notifiers(void)
  1861. {
  1862. int rc;
  1863. QETH_DBF_TEXT(SETUP, 5, "regnotif");
  1864. rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
  1865. if (rc)
  1866. return rc;
  1867. rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
  1868. if (rc) {
  1869. unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
  1870. return rc;
  1871. }
  1872. return 0;
  1873. }
  1874. static void qeth_l3_unregister_notifiers(void)
  1875. {
  1876. QETH_DBF_TEXT(SETUP, 5, "unregnot");
  1877. WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
  1878. WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
  1879. }
  1880. static int __init qeth_l3_init(void)
  1881. {
  1882. pr_info("register layer 3 discipline\n");
  1883. return qeth_l3_register_notifiers();
  1884. }
  1885. static void __exit qeth_l3_exit(void)
  1886. {
  1887. qeth_l3_unregister_notifiers();
  1888. pr_info("unregister layer 3 discipline\n");
  1889. }
  1890. module_init(qeth_l3_init);
  1891. module_exit(qeth_l3_exit);
  1892. MODULE_AUTHOR("Frank Blaschka <[email protected]>");
  1893. MODULE_DESCRIPTION("qeth layer 3 discipline");
  1894. MODULE_LICENSE("GPL");