zfcp_fc.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * zfcp device driver
  4. *
  5. * Fibre Channel related functions for the zfcp device driver.
  6. *
  7. * Copyright IBM Corp. 2008, 2017
  8. */
  9. #define KMSG_COMPONENT "zfcp"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/types.h>
  12. #include <linux/slab.h>
  13. #include <linux/utsname.h>
  14. #include <linux/random.h>
  15. #include <linux/bsg-lib.h>
  16. #include <scsi/fc/fc_els.h>
  17. #include <scsi/libfc.h>
  18. #include "zfcp_ext.h"
  19. #include "zfcp_fc.h"
  20. struct kmem_cache *zfcp_fc_req_cache;
  21. static u32 zfcp_fc_rscn_range_mask[] = {
  22. [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
  23. [ELS_ADDR_FMT_AREA] = 0xFFFF00,
  24. [ELS_ADDR_FMT_DOM] = 0xFF0000,
  25. [ELS_ADDR_FMT_FAB] = 0x000000,
  26. };
  27. static bool no_auto_port_rescan;
  28. module_param(no_auto_port_rescan, bool, 0600);
  29. MODULE_PARM_DESC(no_auto_port_rescan,
  30. "no automatic port_rescan (default off)");
  31. static unsigned int port_scan_backoff = 500;
  32. module_param(port_scan_backoff, uint, 0600);
  33. MODULE_PARM_DESC(port_scan_backoff,
  34. "upper limit of port scan random backoff in msecs (default 500)");
  35. static unsigned int port_scan_ratelimit = 60000;
  36. module_param(port_scan_ratelimit, uint, 0600);
  37. MODULE_PARM_DESC(port_scan_ratelimit,
  38. "minimum interval between port scans in msecs (default 60000)");
  39. unsigned int zfcp_fc_port_scan_backoff(void)
  40. {
  41. if (!port_scan_backoff)
  42. return 0;
  43. return prandom_u32_max(port_scan_backoff);
  44. }
  45. static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
  46. {
  47. unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
  48. unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
  49. adapter->next_port_scan = jiffies + interval + backoff;
  50. }
  51. static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
  52. {
  53. unsigned long now = jiffies;
  54. unsigned long next = adapter->next_port_scan;
  55. unsigned long delay = 0, max;
  56. /* delay only needed within waiting period */
  57. if (time_before(now, next)) {
  58. delay = next - now;
  59. /* paranoia: never ever delay scans longer than specified */
  60. max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
  61. delay = min(delay, max);
  62. }
  63. queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
  64. }
  65. void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
  66. {
  67. if (no_auto_port_rescan)
  68. return;
  69. zfcp_fc_port_scan(adapter);
  70. }
  71. void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
  72. {
  73. if (!no_auto_port_rescan)
  74. return;
  75. zfcp_fc_port_scan(adapter);
  76. }
  77. /**
  78. * zfcp_fc_post_event - post event to userspace via fc_transport
  79. * @work: work struct with enqueued events
  80. */
  81. void zfcp_fc_post_event(struct work_struct *work)
  82. {
  83. struct zfcp_fc_event *event = NULL, *tmp = NULL;
  84. LIST_HEAD(tmp_lh);
  85. struct zfcp_fc_events *events = container_of(work,
  86. struct zfcp_fc_events, work);
  87. struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
  88. events);
  89. spin_lock_bh(&events->list_lock);
  90. list_splice_init(&events->list, &tmp_lh);
  91. spin_unlock_bh(&events->list_lock);
  92. list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
  93. fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
  94. event->code, event->data);
  95. list_del(&event->list);
  96. kfree(event);
  97. }
  98. }
  99. /**
  100. * zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
  101. * @adapter: The adapter where to enqueue the event
  102. * @event_code: The event code (as defined in fc_host_event_code in
  103. * scsi_transport_fc.h)
  104. * @event_data: The event data (e.g. n_port page in case of els)
  105. */
  106. void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
  107. enum fc_host_event_code event_code, u32 event_data)
  108. {
  109. struct zfcp_fc_event *event;
  110. event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
  111. if (!event)
  112. return;
  113. event->code = event_code;
  114. event->data = event_data;
  115. spin_lock(&adapter->events.list_lock);
  116. list_add_tail(&event->list, &adapter->events.list);
  117. spin_unlock(&adapter->events.list_lock);
  118. queue_work(adapter->work_queue, &adapter->events.work);
  119. }
  120. static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
  121. {
  122. int ret = -EIO;
  123. if (mutex_lock_interruptible(&wka_port->mutex))
  124. return -ERESTARTSYS;
  125. if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
  126. wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
  127. wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
  128. if (zfcp_fsf_open_wka_port(wka_port)) {
  129. /* could not even send request, nothing to wait for */
  130. wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
  131. goto out;
  132. }
  133. }
  134. wait_event(wka_port->opened,
  135. wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
  136. wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
  137. if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
  138. atomic_inc(&wka_port->refcount);
  139. ret = 0;
  140. goto out;
  141. }
  142. out:
  143. mutex_unlock(&wka_port->mutex);
  144. return ret;
  145. }
  146. static void zfcp_fc_wka_port_offline(struct work_struct *work)
  147. {
  148. struct delayed_work *dw = to_delayed_work(work);
  149. struct zfcp_fc_wka_port *wka_port =
  150. container_of(dw, struct zfcp_fc_wka_port, work);
  151. mutex_lock(&wka_port->mutex);
  152. if ((atomic_read(&wka_port->refcount) != 0) ||
  153. (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
  154. goto out;
  155. wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
  156. if (zfcp_fsf_close_wka_port(wka_port)) {
  157. /* could not even send request, nothing to wait for */
  158. wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
  159. goto out;
  160. }
  161. wait_event(wka_port->closed,
  162. wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
  163. out:
  164. mutex_unlock(&wka_port->mutex);
  165. }
  166. static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
  167. {
  168. if (atomic_dec_return(&wka_port->refcount) != 0)
  169. return;
  170. /* wait 10 milliseconds, other reqs might pop in */
  171. queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work,
  172. msecs_to_jiffies(10));
  173. }
  174. static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
  175. struct zfcp_adapter *adapter)
  176. {
  177. init_waitqueue_head(&wka_port->opened);
  178. init_waitqueue_head(&wka_port->closed);
  179. wka_port->adapter = adapter;
  180. wka_port->d_id = d_id;
  181. wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
  182. atomic_set(&wka_port->refcount, 0);
  183. mutex_init(&wka_port->mutex);
  184. INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
  185. }
  186. static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
  187. {
  188. cancel_delayed_work_sync(&wka->work);
  189. mutex_lock(&wka->mutex);
  190. wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
  191. mutex_unlock(&wka->mutex);
  192. }
  193. void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
  194. {
  195. if (!gs)
  196. return;
  197. zfcp_fc_wka_port_force_offline(&gs->ms);
  198. zfcp_fc_wka_port_force_offline(&gs->ts);
  199. zfcp_fc_wka_port_force_offline(&gs->ds);
  200. zfcp_fc_wka_port_force_offline(&gs->as);
  201. }
  202. static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
  203. struct fc_els_rscn_page *page)
  204. {
  205. unsigned long flags;
  206. struct zfcp_adapter *adapter = fsf_req->adapter;
  207. struct zfcp_port *port;
  208. read_lock_irqsave(&adapter->port_list_lock, flags);
  209. list_for_each_entry(port, &adapter->port_list, list) {
  210. if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
  211. zfcp_fc_test_link(port);
  212. }
  213. read_unlock_irqrestore(&adapter->port_list_lock, flags);
  214. }
  215. static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
  216. {
  217. struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
  218. struct zfcp_adapter *adapter = fsf_req->adapter;
  219. struct fc_els_rscn *head;
  220. struct fc_els_rscn_page *page;
  221. u16 i;
  222. u16 no_entries;
  223. unsigned int afmt;
  224. head = (struct fc_els_rscn *) status_buffer->payload.data;
  225. page = (struct fc_els_rscn_page *) head;
  226. /* see FC-FS */
  227. no_entries = be16_to_cpu(head->rscn_plen) /
  228. sizeof(struct fc_els_rscn_page);
  229. if (no_entries > 1) {
  230. /* handle failed ports */
  231. unsigned long flags;
  232. struct zfcp_port *port;
  233. read_lock_irqsave(&adapter->port_list_lock, flags);
  234. list_for_each_entry(port, &adapter->port_list, list) {
  235. if (port->d_id)
  236. continue;
  237. zfcp_erp_port_reopen(port,
  238. ZFCP_STATUS_COMMON_ERP_FAILED,
  239. "fcrscn1");
  240. }
  241. read_unlock_irqrestore(&adapter->port_list_lock, flags);
  242. }
  243. for (i = 1; i < no_entries; i++) {
  244. /* skip head and start with 1st element */
  245. page++;
  246. afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
  247. _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
  248. page);
  249. zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
  250. *(u32 *)page);
  251. }
  252. zfcp_fc_conditional_port_scan(fsf_req->adapter);
  253. }
  254. static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
  255. {
  256. unsigned long flags;
  257. struct zfcp_adapter *adapter = req->adapter;
  258. struct zfcp_port *port;
  259. read_lock_irqsave(&adapter->port_list_lock, flags);
  260. list_for_each_entry(port, &adapter->port_list, list)
  261. if (port->wwpn == wwpn) {
  262. zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
  263. break;
  264. }
  265. read_unlock_irqrestore(&adapter->port_list_lock, flags);
  266. }
  267. static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
  268. {
  269. struct fsf_status_read_buffer *status_buffer;
  270. struct fc_els_flogi *plogi;
  271. status_buffer = (struct fsf_status_read_buffer *) req->data;
  272. plogi = (struct fc_els_flogi *) status_buffer->payload.data;
  273. zfcp_fc_incoming_wwpn(req, be64_to_cpu(plogi->fl_wwpn));
  274. }
  275. static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
  276. {
  277. struct fsf_status_read_buffer *status_buffer =
  278. (struct fsf_status_read_buffer *)req->data;
  279. struct fc_els_logo *logo =
  280. (struct fc_els_logo *) status_buffer->payload.data;
  281. zfcp_fc_incoming_wwpn(req, be64_to_cpu(logo->fl_n_port_wwn));
  282. }
  283. /**
  284. * zfcp_fc_incoming_els - handle incoming ELS
  285. * @fsf_req: request which contains incoming ELS
  286. */
  287. void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
  288. {
  289. struct fsf_status_read_buffer *status_buffer =
  290. (struct fsf_status_read_buffer *) fsf_req->data;
  291. unsigned int els_type = status_buffer->payload.data[0];
  292. zfcp_dbf_san_in_els("fciels1", fsf_req);
  293. if (els_type == ELS_PLOGI)
  294. zfcp_fc_incoming_plogi(fsf_req);
  295. else if (els_type == ELS_LOGO)
  296. zfcp_fc_incoming_logo(fsf_req);
  297. else if (els_type == ELS_RSCN)
  298. zfcp_fc_incoming_rscn(fsf_req);
  299. }
  300. static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
  301. {
  302. struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
  303. struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
  304. if (ct_els->status)
  305. return;
  306. if (gid_pn_rsp->ct_hdr.ct_cmd != cpu_to_be16(FC_FS_ACC))
  307. return;
  308. /* looks like a valid d_id */
  309. ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
  310. }
  311. static void zfcp_fc_complete(void *data)
  312. {
  313. complete(data);
  314. }
  315. static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
  316. {
  317. ct_hdr->ct_rev = FC_CT_REV;
  318. ct_hdr->ct_fs_type = FC_FST_DIR;
  319. ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
  320. ct_hdr->ct_cmd = cpu_to_be16(cmd);
  321. ct_hdr->ct_mr_size = cpu_to_be16(mr_size / 4);
  322. }
  323. static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
  324. struct zfcp_fc_req *fc_req)
  325. {
  326. struct zfcp_adapter *adapter = port->adapter;
  327. DECLARE_COMPLETION_ONSTACK(completion);
  328. struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
  329. struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
  330. int ret;
  331. /* setup parameters for send generic command */
  332. fc_req->ct_els.port = port;
  333. fc_req->ct_els.handler = zfcp_fc_complete;
  334. fc_req->ct_els.handler_data = &completion;
  335. fc_req->ct_els.req = &fc_req->sg_req;
  336. fc_req->ct_els.resp = &fc_req->sg_rsp;
  337. sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
  338. sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
  339. zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
  340. FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
  341. gid_pn_req->gid_pn.fn_wwpn = cpu_to_be64(port->wwpn);
  342. ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
  343. adapter->pool.gid_pn_req,
  344. ZFCP_FC_CTELS_TMO);
  345. if (!ret) {
  346. wait_for_completion(&completion);
  347. zfcp_fc_ns_gid_pn_eval(fc_req);
  348. }
  349. return ret;
  350. }
  351. /**
  352. * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
  353. * @port: port where GID_PN request is needed
  354. * return: -ENOMEM on error, 0 otherwise
  355. */
  356. static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
  357. {
  358. int ret;
  359. struct zfcp_fc_req *fc_req;
  360. struct zfcp_adapter *adapter = port->adapter;
  361. fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
  362. if (!fc_req)
  363. return -ENOMEM;
  364. memset(fc_req, 0, sizeof(*fc_req));
  365. ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
  366. if (ret)
  367. goto out;
  368. ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
  369. zfcp_fc_wka_port_put(&adapter->gs->ds);
  370. out:
  371. mempool_free(fc_req, adapter->pool.gid_pn);
  372. return ret;
  373. }
  374. void zfcp_fc_port_did_lookup(struct work_struct *work)
  375. {
  376. int ret;
  377. struct zfcp_port *port = container_of(work, struct zfcp_port,
  378. gid_pn_work);
  379. set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
  380. ret = zfcp_fc_ns_gid_pn(port);
  381. if (ret) {
  382. /* could not issue gid_pn for some reason */
  383. zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
  384. goto out;
  385. }
  386. if (!port->d_id) {
  387. zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
  388. goto out;
  389. }
  390. zfcp_erp_port_reopen(port, 0, "fcgpn_3");
  391. out:
  392. put_device(&port->dev);
  393. }
  394. /**
  395. * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
  396. * @port: The zfcp_port to lookup the d_id for.
  397. */
  398. void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
  399. {
  400. get_device(&port->dev);
  401. if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
  402. put_device(&port->dev);
  403. }
  404. /**
  405. * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
  406. * @port: zfcp_port structure
  407. * @plogi: plogi payload
  408. *
  409. * Evaluate PLOGI playload and copy important fields into zfcp_port structure
  410. */
  411. void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
  412. {
  413. if (be64_to_cpu(plogi->fl_wwpn) != port->wwpn) {
  414. port->d_id = 0;
  415. dev_warn(&port->adapter->ccw_device->dev,
  416. "A port opened with WWPN 0x%016Lx returned data that "
  417. "identifies it as WWPN 0x%016Lx\n",
  418. (unsigned long long) port->wwpn,
  419. (unsigned long long) be64_to_cpu(plogi->fl_wwpn));
  420. return;
  421. }
  422. port->wwnn = be64_to_cpu(plogi->fl_wwnn);
  423. port->maxframe_size = be16_to_cpu(plogi->fl_csp.sp_bb_data);
  424. if (plogi->fl_cssp[0].cp_class & cpu_to_be16(FC_CPC_VALID))
  425. port->supported_classes |= FC_COS_CLASS1;
  426. if (plogi->fl_cssp[1].cp_class & cpu_to_be16(FC_CPC_VALID))
  427. port->supported_classes |= FC_COS_CLASS2;
  428. if (plogi->fl_cssp[2].cp_class & cpu_to_be16(FC_CPC_VALID))
  429. port->supported_classes |= FC_COS_CLASS3;
  430. if (plogi->fl_cssp[3].cp_class & cpu_to_be16(FC_CPC_VALID))
  431. port->supported_classes |= FC_COS_CLASS4;
  432. }
  433. static void zfcp_fc_adisc_handler(void *data)
  434. {
  435. struct zfcp_fc_req *fc_req = data;
  436. struct zfcp_port *port = fc_req->ct_els.port;
  437. struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
  438. if (fc_req->ct_els.status) {
  439. /* request rejected or timed out */
  440. zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
  441. "fcadh_1");
  442. goto out;
  443. }
  444. if (!port->wwnn)
  445. port->wwnn = be64_to_cpu(adisc_resp->adisc_wwnn);
  446. if ((port->wwpn != be64_to_cpu(adisc_resp->adisc_wwpn)) ||
  447. !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
  448. zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
  449. "fcadh_2");
  450. goto out;
  451. }
  452. /* re-init to undo drop from zfcp_fc_adisc() */
  453. port->d_id = ntoh24(adisc_resp->adisc_port_id);
  454. /* port is still good, nothing to do */
  455. out:
  456. atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
  457. put_device(&port->dev);
  458. kmem_cache_free(zfcp_fc_req_cache, fc_req);
  459. }
  460. static int zfcp_fc_adisc(struct zfcp_port *port)
  461. {
  462. struct zfcp_fc_req *fc_req;
  463. struct zfcp_adapter *adapter = port->adapter;
  464. struct Scsi_Host *shost = adapter->scsi_host;
  465. u32 d_id;
  466. int ret;
  467. fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
  468. if (!fc_req)
  469. return -ENOMEM;
  470. fc_req->ct_els.port = port;
  471. fc_req->ct_els.req = &fc_req->sg_req;
  472. fc_req->ct_els.resp = &fc_req->sg_rsp;
  473. sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
  474. sizeof(struct fc_els_adisc));
  475. sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
  476. sizeof(struct fc_els_adisc));
  477. fc_req->ct_els.handler = zfcp_fc_adisc_handler;
  478. fc_req->ct_els.handler_data = fc_req;
  479. /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
  480. without FC-AL-2 capability, so we don't set it */
  481. fc_req->u.adisc.req.adisc_wwpn = cpu_to_be64(fc_host_port_name(shost));
  482. fc_req->u.adisc.req.adisc_wwnn = cpu_to_be64(fc_host_node_name(shost));
  483. fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
  484. hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
  485. d_id = port->d_id; /* remember as destination for send els below */
  486. /*
  487. * Force fresh GID_PN lookup on next port recovery.
  488. * Must happen after request setup and before sending request,
  489. * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
  490. */
  491. port->d_id = 0;
  492. ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
  493. ZFCP_FC_CTELS_TMO);
  494. if (ret)
  495. kmem_cache_free(zfcp_fc_req_cache, fc_req);
  496. return ret;
  497. }
  498. void zfcp_fc_link_test_work(struct work_struct *work)
  499. {
  500. struct zfcp_port *port =
  501. container_of(work, struct zfcp_port, test_link_work);
  502. int retval;
  503. set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
  504. /* only issue one test command at one time per port */
  505. if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
  506. goto out;
  507. atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
  508. retval = zfcp_fc_adisc(port);
  509. if (retval == 0)
  510. return;
  511. /* send of ADISC was not possible */
  512. atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
  513. zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
  514. out:
  515. put_device(&port->dev);
  516. }
  517. /**
  518. * zfcp_fc_test_link - lightweight link test procedure
  519. * @port: port to be tested
  520. *
  521. * Test status of a link to a remote port using the ELS command ADISC.
  522. * If there is a problem with the remote port, error recovery steps
  523. * will be triggered.
  524. */
  525. void zfcp_fc_test_link(struct zfcp_port *port)
  526. {
  527. get_device(&port->dev);
  528. if (!queue_work(port->adapter->work_queue, &port->test_link_work))
  529. put_device(&port->dev);
  530. }
  531. /**
  532. * zfcp_fc_sg_free_table - free memory used by scatterlists
  533. * @sg: pointer to scatterlist
  534. * @count: number of scatterlist which are to be free'ed
  535. * the scatterlist are expected to reference pages always
  536. */
  537. static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
  538. {
  539. int i;
  540. for (i = 0; i < count; i++, sg = sg_next(sg))
  541. if (sg)
  542. free_page((unsigned long) sg_virt(sg));
  543. else
  544. break;
  545. }
  546. /**
  547. * zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
  548. * @sg: pointer to struct scatterlist
  549. * @count: number of scatterlists which should be assigned with buffers
  550. * of size page
  551. *
  552. * Returns: 0 on success, -ENOMEM otherwise
  553. */
  554. static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
  555. {
  556. void *addr;
  557. int i;
  558. sg_init_table(sg, count);
  559. for (i = 0; i < count; i++, sg = sg_next(sg)) {
  560. addr = (void *) get_zeroed_page(GFP_KERNEL);
  561. if (!addr) {
  562. zfcp_fc_sg_free_table(sg, i);
  563. return -ENOMEM;
  564. }
  565. sg_set_buf(sg, addr, PAGE_SIZE);
  566. }
  567. return 0;
  568. }
  569. static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
  570. {
  571. struct zfcp_fc_req *fc_req;
  572. fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
  573. if (!fc_req)
  574. return NULL;
  575. if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
  576. kmem_cache_free(zfcp_fc_req_cache, fc_req);
  577. return NULL;
  578. }
  579. sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
  580. sizeof(struct zfcp_fc_gpn_ft_req));
  581. return fc_req;
  582. }
  583. static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
  584. struct zfcp_adapter *adapter, int max_bytes)
  585. {
  586. struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
  587. struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
  588. DECLARE_COMPLETION_ONSTACK(completion);
  589. int ret;
  590. zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
  591. req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
  592. ct_els->handler = zfcp_fc_complete;
  593. ct_els->handler_data = &completion;
  594. ct_els->req = &fc_req->sg_req;
  595. ct_els->resp = &fc_req->sg_rsp;
  596. ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
  597. ZFCP_FC_CTELS_TMO);
  598. if (!ret)
  599. wait_for_completion(&completion);
  600. return ret;
  601. }
  602. static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
  603. {
  604. if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
  605. return;
  606. atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
  607. if ((port->supported_classes != 0) ||
  608. !list_empty(&port->unit_list))
  609. return;
  610. list_move_tail(&port->list, lh);
  611. }
  612. static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
  613. struct zfcp_adapter *adapter, int max_entries)
  614. {
  615. struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
  616. struct scatterlist *sg = &fc_req->sg_rsp;
  617. struct fc_ct_hdr *hdr = sg_virt(sg);
  618. struct fc_gpn_ft_resp *acc = sg_virt(sg);
  619. struct zfcp_port *port, *tmp;
  620. unsigned long flags;
  621. LIST_HEAD(remove_lh);
  622. u32 d_id;
  623. int ret = 0, x, last = 0;
  624. if (ct_els->status)
  625. return -EIO;
  626. if (hdr->ct_cmd != cpu_to_be16(FC_FS_ACC)) {
  627. if (hdr->ct_reason == FC_FS_RJT_UNABL)
  628. return -EAGAIN; /* might be a temporary condition */
  629. return -EIO;
  630. }
  631. if (hdr->ct_mr_size) {
  632. dev_warn(&adapter->ccw_device->dev,
  633. "The name server reported %d words residual data\n",
  634. hdr->ct_mr_size);
  635. return -E2BIG;
  636. }
  637. /* first entry is the header */
  638. for (x = 1; x < max_entries && !last; x++) {
  639. if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
  640. acc++;
  641. else
  642. acc = sg_virt(++sg);
  643. last = acc->fp_flags & FC_NS_FID_LAST;
  644. d_id = ntoh24(acc->fp_fid);
  645. /* don't attach ports with a well known address */
  646. if (d_id >= FC_FID_WELL_KNOWN_BASE)
  647. continue;
  648. /* skip the adapter's port and known remote ports */
  649. if (be64_to_cpu(acc->fp_wwpn) ==
  650. fc_host_port_name(adapter->scsi_host))
  651. continue;
  652. port = zfcp_port_enqueue(adapter, be64_to_cpu(acc->fp_wwpn),
  653. ZFCP_STATUS_COMMON_NOESC, d_id);
  654. if (!IS_ERR(port))
  655. zfcp_erp_port_reopen(port, 0, "fcegpf1");
  656. else if (PTR_ERR(port) != -EEXIST)
  657. ret = PTR_ERR(port);
  658. }
  659. zfcp_erp_wait(adapter);
  660. write_lock_irqsave(&adapter->port_list_lock, flags);
  661. list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
  662. zfcp_fc_validate_port(port, &remove_lh);
  663. write_unlock_irqrestore(&adapter->port_list_lock, flags);
  664. list_for_each_entry_safe(port, tmp, &remove_lh, list) {
  665. zfcp_erp_port_shutdown(port, 0, "fcegpf2");
  666. device_unregister(&port->dev);
  667. }
  668. return ret;
  669. }
  670. /**
  671. * zfcp_fc_scan_ports - scan remote ports and attach new ports
  672. * @work: reference to scheduled work
  673. */
  674. void zfcp_fc_scan_ports(struct work_struct *work)
  675. {
  676. struct delayed_work *dw = to_delayed_work(work);
  677. struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
  678. scan_work);
  679. int ret, i;
  680. struct zfcp_fc_req *fc_req;
  681. int chain, max_entries, buf_num, max_bytes;
  682. zfcp_fc_port_scan_time(adapter);
  683. chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
  684. buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
  685. max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
  686. max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
  687. if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
  688. fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
  689. return;
  690. if (zfcp_fc_wka_port_get(&adapter->gs->ds))
  691. return;
  692. fc_req = zfcp_fc_alloc_sg_env(buf_num);
  693. if (!fc_req)
  694. goto out;
  695. for (i = 0; i < 3; i++) {
  696. ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
  697. if (!ret) {
  698. ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
  699. if (ret == -EAGAIN)
  700. ssleep(1);
  701. else
  702. break;
  703. }
  704. }
  705. zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
  706. kmem_cache_free(zfcp_fc_req_cache, fc_req);
  707. out:
  708. zfcp_fc_wka_port_put(&adapter->gs->ds);
  709. }
  710. static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
  711. struct zfcp_fc_req *fc_req)
  712. {
  713. DECLARE_COMPLETION_ONSTACK(completion);
  714. char devno[] = "DEVNO:";
  715. struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
  716. struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
  717. struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
  718. int ret;
  719. zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
  720. FC_SYMBOLIC_NAME_SIZE);
  721. hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
  722. sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
  723. sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
  724. ct_els->handler = zfcp_fc_complete;
  725. ct_els->handler_data = &completion;
  726. ct_els->req = &fc_req->sg_req;
  727. ct_els->resp = &fc_req->sg_rsp;
  728. ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
  729. ZFCP_FC_CTELS_TMO);
  730. if (ret)
  731. return ret;
  732. wait_for_completion(&completion);
  733. if (ct_els->status)
  734. return ct_els->status;
  735. if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
  736. !(strstr(gspn_rsp->gspn.fp_name, devno)))
  737. snprintf(fc_host_symbolic_name(adapter->scsi_host),
  738. FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
  739. gspn_rsp->gspn.fp_name, devno,
  740. dev_name(&adapter->ccw_device->dev),
  741. init_utsname()->nodename);
  742. else
  743. strscpy(fc_host_symbolic_name(adapter->scsi_host),
  744. gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
  745. return 0;
  746. }
  747. static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
  748. struct zfcp_fc_req *fc_req)
  749. {
  750. DECLARE_COMPLETION_ONSTACK(completion);
  751. struct Scsi_Host *shost = adapter->scsi_host;
  752. struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
  753. struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
  754. struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
  755. int ret, len;
  756. zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
  757. FC_SYMBOLIC_NAME_SIZE);
  758. hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
  759. len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
  760. FC_SYMBOLIC_NAME_SIZE);
  761. rspn_req->rspn.fr_name_len = len;
  762. sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
  763. sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
  764. ct_els->handler = zfcp_fc_complete;
  765. ct_els->handler_data = &completion;
  766. ct_els->req = &fc_req->sg_req;
  767. ct_els->resp = &fc_req->sg_rsp;
  768. ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
  769. ZFCP_FC_CTELS_TMO);
  770. if (!ret)
  771. wait_for_completion(&completion);
  772. }
  773. /**
  774. * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
  775. * @work: ns_up_work of the adapter where to update the symbolic port name
  776. *
  777. * Retrieve the current symbolic port name that may have been set by
  778. * the hardware using the GSPN request and update the fc_host
  779. * symbolic_name sysfs attribute. When running in NPIV mode (and hence
  780. * the port name is unique for this system), update the symbolic port
  781. * name to add Linux specific information and update the FC nameserver
  782. * using the RSPN request.
  783. */
  784. void zfcp_fc_sym_name_update(struct work_struct *work)
  785. {
  786. struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
  787. ns_up_work);
  788. int ret;
  789. struct zfcp_fc_req *fc_req;
  790. if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
  791. fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
  792. return;
  793. fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
  794. if (!fc_req)
  795. return;
  796. ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
  797. if (ret)
  798. goto out_free;
  799. ret = zfcp_fc_gspn(adapter, fc_req);
  800. if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
  801. goto out_ds_put;
  802. memset(fc_req, 0, sizeof(*fc_req));
  803. zfcp_fc_rspn(adapter, fc_req);
  804. out_ds_put:
  805. zfcp_fc_wka_port_put(&adapter->gs->ds);
  806. out_free:
  807. kmem_cache_free(zfcp_fc_req_cache, fc_req);
  808. }
  809. static void zfcp_fc_ct_els_job_handler(void *data)
  810. {
  811. struct bsg_job *job = data;
  812. struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
  813. struct fc_bsg_reply *jr = job->reply;
  814. jr->reply_payload_rcv_len = job->reply_payload.payload_len;
  815. jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  816. jr->result = zfcp_ct_els->status ? -EIO : 0;
  817. bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
  818. }
  819. static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
  820. {
  821. u32 preamble_word1;
  822. u8 gs_type;
  823. struct zfcp_adapter *adapter;
  824. struct fc_bsg_request *bsg_request = job->request;
  825. struct fc_rport *rport = fc_bsg_to_rport(job);
  826. struct Scsi_Host *shost;
  827. preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
  828. gs_type = (preamble_word1 & 0xff000000) >> 24;
  829. shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
  830. adapter = (struct zfcp_adapter *) shost->hostdata[0];
  831. switch (gs_type) {
  832. case FC_FST_ALIAS:
  833. return &adapter->gs->as;
  834. case FC_FST_MGMT:
  835. return &adapter->gs->ms;
  836. case FC_FST_TIME:
  837. return &adapter->gs->ts;
  838. break;
  839. case FC_FST_DIR:
  840. return &adapter->gs->ds;
  841. break;
  842. default:
  843. return NULL;
  844. }
  845. }
  846. static void zfcp_fc_ct_job_handler(void *data)
  847. {
  848. struct bsg_job *job = data;
  849. struct zfcp_fc_wka_port *wka_port;
  850. wka_port = zfcp_fc_job_wka_port(job);
  851. zfcp_fc_wka_port_put(wka_port);
  852. zfcp_fc_ct_els_job_handler(data);
  853. }
  854. static int zfcp_fc_exec_els_job(struct bsg_job *job,
  855. struct zfcp_adapter *adapter)
  856. {
  857. struct zfcp_fsf_ct_els *els = job->dd_data;
  858. struct fc_rport *rport = fc_bsg_to_rport(job);
  859. struct fc_bsg_request *bsg_request = job->request;
  860. struct zfcp_port *port;
  861. u32 d_id;
  862. if (rport) {
  863. port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
  864. if (!port)
  865. return -EINVAL;
  866. d_id = port->d_id;
  867. put_device(&port->dev);
  868. } else
  869. d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
  870. els->handler = zfcp_fc_ct_els_job_handler;
  871. return zfcp_fsf_send_els(adapter, d_id, els, job->timeout / HZ);
  872. }
  873. static int zfcp_fc_exec_ct_job(struct bsg_job *job,
  874. struct zfcp_adapter *adapter)
  875. {
  876. int ret;
  877. struct zfcp_fsf_ct_els *ct = job->dd_data;
  878. struct zfcp_fc_wka_port *wka_port;
  879. wka_port = zfcp_fc_job_wka_port(job);
  880. if (!wka_port)
  881. return -EINVAL;
  882. ret = zfcp_fc_wka_port_get(wka_port);
  883. if (ret)
  884. return ret;
  885. ct->handler = zfcp_fc_ct_job_handler;
  886. ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->timeout / HZ);
  887. if (ret)
  888. zfcp_fc_wka_port_put(wka_port);
  889. return ret;
  890. }
  891. int zfcp_fc_exec_bsg_job(struct bsg_job *job)
  892. {
  893. struct Scsi_Host *shost;
  894. struct zfcp_adapter *adapter;
  895. struct zfcp_fsf_ct_els *ct_els = job->dd_data;
  896. struct fc_bsg_request *bsg_request = job->request;
  897. struct fc_rport *rport = fc_bsg_to_rport(job);
  898. shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
  899. adapter = (struct zfcp_adapter *)shost->hostdata[0];
  900. if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
  901. return -EINVAL;
  902. ct_els->req = job->request_payload.sg_list;
  903. ct_els->resp = job->reply_payload.sg_list;
  904. ct_els->handler_data = job;
  905. switch (bsg_request->msgcode) {
  906. case FC_BSG_RPT_ELS:
  907. case FC_BSG_HST_ELS_NOLOGIN:
  908. return zfcp_fc_exec_els_job(job, adapter);
  909. case FC_BSG_RPT_CT:
  910. case FC_BSG_HST_CT:
  911. return zfcp_fc_exec_ct_job(job, adapter);
  912. default:
  913. return -EINVAL;
  914. }
  915. }
  916. int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
  917. {
  918. /* hardware tracks timeout, reset bsg timeout to not interfere */
  919. return -EAGAIN;
  920. }
  921. int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
  922. {
  923. struct zfcp_fc_wka_ports *wka_ports;
  924. wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
  925. if (!wka_ports)
  926. return -ENOMEM;
  927. adapter->gs = wka_ports;
  928. zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
  929. zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
  930. zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
  931. zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
  932. return 0;
  933. }
  934. void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
  935. {
  936. kfree(adapter->gs);
  937. adapter->gs = NULL;
  938. }