1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
- */
- #include "efct_driver.h"
- #include "efct_hw.h"
- #include "efct_unsol.h"
- struct efct_hw_link_stat_cb_arg {
- void (*cb)(int status, u32 num_counters,
- struct efct_hw_link_stat_counts *counters, void *arg);
- void *arg;
- };
- struct efct_hw_host_stat_cb_arg {
- void (*cb)(int status, u32 num_counters,
- struct efct_hw_host_stat_counts *counters, void *arg);
- void *arg;
- };
- struct efct_hw_fw_wr_cb_arg {
- void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
- void *arg;
- };
- struct efct_mbox_rqst_ctx {
- int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
- void *arg;
- };
- static int
- efct_hw_link_event_init(struct efct_hw *hw)
- {
- hw->link.status = SLI4_LINK_STATUS_MAX;
- hw->link.topology = SLI4_LINK_TOPO_NONE;
- hw->link.medium = SLI4_LINK_MEDIUM_MAX;
- hw->link.speed = 0;
- hw->link.loop_map = NULL;
- hw->link.fc_id = U32_MAX;
- return 0;
- }
- static int
- efct_hw_read_max_dump_size(struct efct_hw *hw)
- {
- u8 buf[SLI4_BMBX_SIZE];
- struct efct *efct = hw->os;
- int rc = 0;
- struct sli4_rsp_cmn_set_dump_location *rsp;
- /* attempt to detemine the dump size for function 0 only. */
- if (PCI_FUNC(efct->pci->devfn) != 0)
- return rc;
- if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
- return -EIO;
- rsp = (struct sli4_rsp_cmn_set_dump_location *)
- (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
- rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
- if (rc != 0) {
- efc_log_debug(hw->os, "set dump location cmd failed\n");
- return rc;
- }
- hw->dump_size =
- le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
- efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
- return rc;
- }
- static int
- __efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
- {
- struct sli4_cmd_read_topology *read_topo =
- (struct sli4_cmd_read_topology *)mqe;
- u8 speed;
- struct efc_domain_record drec = {0};
- struct efct *efct = hw->os;
- if (status || le16_to_cpu(read_topo->hdr.status)) {
- efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
- le16_to_cpu(read_topo->hdr.status));
- return -EIO;
- }
- switch (le32_to_cpu(read_topo->dw2_attentype) &
- SLI4_READTOPO_ATTEN_TYPE) {
- case SLI4_READ_TOPOLOGY_LINK_UP:
- hw->link.status = SLI4_LINK_STATUS_UP;
- break;
- case SLI4_READ_TOPOLOGY_LINK_DOWN:
- hw->link.status = SLI4_LINK_STATUS_DOWN;
- break;
- case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
- hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
- break;
- default:
- hw->link.status = SLI4_LINK_STATUS_MAX;
- break;
- }
- switch (read_topo->topology) {
- case SLI4_READ_TOPO_NON_FC_AL:
- hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
- break;
- case SLI4_READ_TOPO_FC_AL:
- hw->link.topology = SLI4_LINK_TOPO_FC_AL;
- if (hw->link.status == SLI4_LINK_STATUS_UP)
- hw->link.loop_map = hw->loop_map.virt;
- hw->link.fc_id = read_topo->acquired_al_pa;
- break;
- default:
- hw->link.topology = SLI4_LINK_TOPO_MAX;
- break;
- }
- hw->link.medium = SLI4_LINK_MEDIUM_FC;
- speed = (le32_to_cpu(read_topo->currlink_state) &
- SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
- switch (speed) {
- case SLI4_READ_TOPOLOGY_SPEED_1G:
- hw->link.speed = 1 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_2G:
- hw->link.speed = 2 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_4G:
- hw->link.speed = 4 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_8G:
- hw->link.speed = 8 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_16G:
- hw->link.speed = 16 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_32G:
- hw->link.speed = 32 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_64G:
- hw->link.speed = 64 * 1000;
- break;
- case SLI4_READ_TOPOLOGY_SPEED_128G:
- hw->link.speed = 128 * 1000;
- break;
- }
- drec.speed = hw->link.speed;
- drec.fc_id = hw->link.fc_id;
- drec.is_nport = true;
- efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
- return 0;
- }
- static int
- efct_hw_cb_link(void *ctx, void *e)
- {
- struct efct_hw *hw = ctx;
- struct sli4_link_event *event = e;
- struct efc_domain *d = NULL;
- int rc = 0;
- struct efct *efct = hw->os;
- efct_hw_link_event_init(hw);
- switch (event->status) {
- case SLI4_LINK_STATUS_UP:
- hw->link = *event;
- efct->efcport->link_status = EFC_LINK_STATUS_UP;
- if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
- struct efc_domain_record drec = {0};
- efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
- event->speed);
- drec.speed = event->speed;
- drec.fc_id = event->fc_id;
- drec.is_nport = true;
- efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
- &drec);
- } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
- u8 buf[SLI4_BMBX_SIZE];
- efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
- event->speed);
- if (!sli_cmd_read_topology(&hw->sli, buf,
- &hw->loop_map)) {
- rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
- __efct_read_topology_cb, NULL);
- }
- if (rc)
- efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
- } else {
- efc_log_info(hw->os, "%s(%#x), speed is %d\n",
- "Link Up, unsupported topology ",
- event->topology, event->speed);
- }
- break;
- case SLI4_LINK_STATUS_DOWN:
- efc_log_info(hw->os, "Link down\n");
- hw->link.status = event->status;
- efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
- d = efct->efcport->domain;
- if (d)
- efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
- break;
- default:
- efc_log_debug(hw->os, "unhandled link status %#x\n",
- event->status);
- break;
- }
- return 0;
- }
- int
- efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
- {
- u32 i, max_sgl, cpus;
- if (hw->hw_setup_called)
- return 0;
- /*
- * efct_hw_init() relies on NULL pointers indicating that a structure
- * needs allocation. If a structure is non-NULL, efct_hw_init() won't
- * free/realloc that memory
- */
- memset(hw, 0, sizeof(struct efct_hw));
- hw->hw_setup_called = true;
- hw->os = os;
- mutex_init(&hw->bmbx_lock);
- spin_lock_init(&hw->cmd_lock);
- INIT_LIST_HEAD(&hw->cmd_head);
- INIT_LIST_HEAD(&hw->cmd_pending);
- hw->cmd_head_count = 0;
- /* Create mailbox command ctx pool */
- hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
- sizeof(struct efct_command_ctx));
- if (!hw->cmd_ctx_pool) {
- efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
- return -EIO;
- }
- /* Create mailbox request ctx pool for library callback */
- hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
- sizeof(struct efct_mbox_rqst_ctx));
- if (!hw->mbox_rqst_pool) {
- efc_log_err(hw->os, "failed to allocate mbox request pool\n");
- return -EIO;
- }
- spin_lock_init(&hw->io_lock);
- INIT_LIST_HEAD(&hw->io_inuse);
- INIT_LIST_HEAD(&hw->io_free);
- INIT_LIST_HEAD(&hw->io_wait_free);
- atomic_set(&hw->io_alloc_failed_count, 0);
- hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
- if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
- efc_log_err(hw->os, "SLI setup failed\n");
- return -EIO;
- }
- efct_hw_link_event_init(hw);
- sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
- /*
- * Set all the queue sizes to the maximum allowed.
- */
- for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
- hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
- /*
- * Adjust the size of the WQs so that the CQ is twice as big as
- * the WQ to allow for 2 completions per IO. This allows us to
- * handle multi-phase as well as aborts.
- */
- hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
- /*
- * The RQ assignment for RQ pair mode.
- */
- hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
- hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
- cpus = num_possible_cpus();
- hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
- max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
- max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
- hw->config.n_sgl = max_sgl;
- (void)efct_hw_read_max_dump_size(hw);
- return 0;
- }
- static void
- efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
- {
- efc_log_info(hw->os,
- "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
- j, hw->config.filter_def[j], i, id);
- }
- static inline void
- efct_hw_init_free_io(struct efct_hw_io *io)
- {
- /*
- * Set io->done to NULL, to avoid any callbacks, should
- * a completion be received for one of these IOs
- */
- io->done = NULL;
- io->abort_done = NULL;
- io->status_saved = false;
- io->abort_in_progress = false;
- io->type = 0xFFFF;
- io->wq = NULL;
- }
- static bool efct_hw_iotype_is_originator(u16 io_type)
- {
- switch (io_type) {
- case EFCT_HW_FC_CT:
- case EFCT_HW_ELS_REQ:
- return true;
- default:
- return false;
- }
- }
- static void
- efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
- {
- /* Restore the default */
- io->sgl = &io->def_sgl;
- io->sgl_count = io->def_sgl_count;
- }
- static void
- efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
- {
- struct efct_hw_io *io = arg;
- struct efct_hw *hw = io->hw;
- struct sli4_fc_wcqe *wcqe = (void *)cqe;
- u32 len = 0;
- u32 ext = 0;
- /* clear xbusy flag if WCQE[XB] is clear */
- if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
- io->xbusy = false;
- /* get extended CQE status */
- switch (io->type) {
- case EFCT_HW_BLS_ACC:
- case EFCT_HW_BLS_RJT:
- break;
- case EFCT_HW_ELS_REQ:
- sli_fc_els_did(&hw->sli, cqe, &ext);
- len = sli_fc_response_length(&hw->sli, cqe);
- break;
- case EFCT_HW_ELS_RSP:
- case EFCT_HW_FC_CT_RSP:
- break;
- case EFCT_HW_FC_CT:
- len = sli_fc_response_length(&hw->sli, cqe);
- break;
- case EFCT_HW_IO_TARGET_WRITE:
- len = sli_fc_io_length(&hw->sli, cqe);
- break;
- case EFCT_HW_IO_TARGET_READ:
- len = sli_fc_io_length(&hw->sli, cqe);
- break;
- case EFCT_HW_IO_TARGET_RSP:
- break;
- case EFCT_HW_IO_DNRX_REQUEUE:
- /* release the count for re-posting the buffer */
- /* efct_hw_io_free(hw, io); */
- break;
- default:
- efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
- io->type, io->indicator);
- break;
- }
- if (status) {
- ext = sli_fc_ext_status(&hw->sli, cqe);
- /*
- * If we're not an originator IO, and XB is set, then issue
- * abort for the IO from within the HW
- */
- if (efct_hw_iotype_is_originator(io->type) &&
- wcqe->flags & SLI4_WCQE_XB) {
- int rc;
- efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
- io->indicator, io->reqtag);
- /*
- * Because targets may send a response when the IO
- * completes using the same XRI, we must wait for the
- * XRI_ABORTED CQE to issue the IO callback
- */
- rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
- if (rc == 0) {
- /*
- * latch status to return after abort is
- * complete
- */
- io->status_saved = true;
- io->saved_status = status;
- io->saved_ext = ext;
- io->saved_len = len;
- goto exit_efct_hw_wq_process_io;
- } else if (rc == -EINPROGRESS) {
- /*
- * Already being aborted by someone else (ABTS
- * perhaps). Just return original
- * error.
- */
- efc_log_debug(hw->os, "%s%#x tag=%#x\n",
- "abort in progress xri=",
- io->indicator, io->reqtag);
- } else {
- /* Failed to abort for some other reason, log
- * error
- */
- efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
- "Failed to abort xri=",
- io->indicator, io->reqtag, rc);
- }
- }
- }
- if (io->done) {
- efct_hw_done_t done = io->done;
- io->done = NULL;
- if (io->status_saved) {
- /* use latched status if exists */
- status = io->saved_status;
- len = io->saved_len;
- ext = io->saved_ext;
- io->status_saved = false;
- }
- /* Restore default SGL */
- efct_hw_io_restore_sgl(hw, io);
- done(io, len, status, ext, io->arg);
- }
- exit_efct_hw_wq_process_io:
- return;
- }
- static int
- efct_hw_setup_io(struct efct_hw *hw)
- {
- u32 i = 0;
- struct efct_hw_io *io = NULL;
- uintptr_t xfer_virt = 0;
- uintptr_t xfer_phys = 0;
- u32 index;
- bool new_alloc = true;
- struct efc_dma *dma;
- struct efct *efct = hw->os;
- if (!hw->io) {
- hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
- if (!hw->io)
- return -ENOMEM;
- memset(hw->io, 0, hw->config.n_io * sizeof(io));
- for (i = 0; i < hw->config.n_io; i++) {
- hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
- if (!hw->io[i])
- goto error;
- }
- /* Create WQE buffs for IO */
- hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
- GFP_KERNEL);
- if (!hw->wqe_buffs) {
- kfree(hw->io);
- return -ENOMEM;
- }
- } else {
- /* re-use existing IOs, including SGLs */
- new_alloc = false;
- }
- if (new_alloc) {
- dma = &hw->xfer_rdy;
- dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
- dma->virt = dma_alloc_coherent(&efct->pci->dev,
- dma->size, &dma->phys, GFP_KERNEL);
- if (!dma->virt)
- return -ENOMEM;
- }
- xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
- xfer_phys = hw->xfer_rdy.phys;
- /* Initialize the pool of HW IO objects */
- for (i = 0; i < hw->config.n_io; i++) {
- struct hw_wq_callback *wqcb;
- io = hw->io[i];
- /* initialize IO fields */
- io->hw = hw;
- /* Assign a WQE buff */
- io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
- /* Allocate the request tag for this IO */
- wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
- if (!wqcb) {
- efc_log_err(hw->os, "can't allocate request tag\n");
- return -ENOSPC;
- }
- io->reqtag = wqcb->instance_index;
- /* Now for the fields that are initialized on each free */
- efct_hw_init_free_io(io);
- /* The XB flag isn't cleared on IO free, so init to zero */
- io->xbusy = 0;
- if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
- &io->indicator, &index)) {
- efc_log_err(hw->os,
- "sli_resource_alloc failed @ %d\n", i);
- return -ENOMEM;
- }
- if (new_alloc) {
- dma = &io->def_sgl;
- dma->size = hw->config.n_sgl *
- sizeof(struct sli4_sge);
- dma->virt = dma_alloc_coherent(&efct->pci->dev,
- dma->size, &dma->phys,
- GFP_KERNEL);
- if (!dma->virt) {
- efc_log_err(hw->os, "dma_alloc fail %d\n", i);
- memset(&io->def_sgl, 0,
- sizeof(struct efc_dma));
- return -ENOMEM;
- }
- }
- io->def_sgl_count = hw->config.n_sgl;
- io->sgl = &io->def_sgl;
- io->sgl_count = io->def_sgl_count;
- if (hw->xfer_rdy.size) {
- io->xfer_rdy.virt = (void *)xfer_virt;
- io->xfer_rdy.phys = xfer_phys;
- io->xfer_rdy.size = sizeof(struct fcp_txrdy);
- xfer_virt += sizeof(struct fcp_txrdy);
- xfer_phys += sizeof(struct fcp_txrdy);
- }
- }
- return 0;
- error:
- for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
- kfree(hw->io[i]);
- hw->io[i] = NULL;
- }
- kfree(hw->io);
- hw->io = NULL;
- return -ENOMEM;
- }
- static int
- efct_hw_init_prereg_io(struct efct_hw *hw)
- {
- u32 i, idx = 0;
- struct efct_hw_io *io = NULL;
- u8 cmd[SLI4_BMBX_SIZE];
- int rc = 0;
- u32 n_rem;
- u32 n = 0;
- u32 sgls_per_request = 256;
- struct efc_dma **sgls = NULL;
- struct efc_dma req;
- struct efct *efct = hw->os;
- sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
- if (!sgls)
- return -ENOMEM;
- memset(&req, 0, sizeof(struct efc_dma));
- req.size = 32 + sgls_per_request * 16;
- req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
- GFP_KERNEL);
- if (!req.virt) {
- kfree(sgls);
- return -ENOMEM;
- }
- for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
- /* Copy address of SGL's into local sgls[] array, break
- * out if the xri is not contiguous.
- */
- u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
- for (n = 0; n < min; n++) {
- /* Check that we have contiguous xri values */
- if (n > 0) {
- if (hw->io[idx + n]->indicator !=
- hw->io[idx + n - 1]->indicator + 1)
- break;
- }
- sgls[n] = hw->io[idx + n]->sgl;
- }
- if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
- hw->io[idx]->indicator, n, sgls, NULL, &req)) {
- rc = -EIO;
- break;
- }
- rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
- if (rc) {
- efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
- break;
- }
- /* Add to tail if successful */
- for (i = 0; i < n; i++, idx++) {
- io = hw->io[idx];
- io->state = EFCT_HW_IO_STATE_FREE;
- INIT_LIST_HEAD(&io->list_entry);
- list_add_tail(&io->list_entry, &hw->io_free);
- }
- }
- dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
- memset(&req, 0, sizeof(struct efc_dma));
- kfree(sgls);
- return rc;
- }
- static int
- efct_hw_init_io(struct efct_hw *hw)
- {
- u32 i, idx = 0;
- bool prereg = false;
- struct efct_hw_io *io = NULL;
- int rc = 0;
- prereg = hw->sli.params.sgl_pre_registered;
- if (prereg)
- return efct_hw_init_prereg_io(hw);
- for (i = 0; i < hw->config.n_io; i++, idx++) {
- io = hw->io[idx];
- io->state = EFCT_HW_IO_STATE_FREE;
- INIT_LIST_HEAD(&io->list_entry);
- list_add_tail(&io->list_entry, &hw->io_free);
- }
- return rc;
- }
- static int
- efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
- {
- int rc = 0;
- u8 buf[SLI4_BMBX_SIZE];
- struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
- memset(¶m, 0, sizeof(param));
- param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
- /* build the set_features command */
- sli_cmd_common_set_features(&hw->sli, buf,
- SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), ¶m);
- rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
- if (rc)
- efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
- fdt_xfer_hint, rc);
- else
- efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
- le32_to_cpu(param.fdt_xfer_hint));
- return rc;
- }
- static int
- efct_hw_config_rq(struct efct_hw *hw)
- {
- u32 min_rq_count, i, rc;
- struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
- u8 buf[SLI4_BMBX_SIZE];
- efc_log_info(hw->os, "using REG_FCFI standard\n");
- /*
- * Set the filter match/mask values from hw's
- * filter_def values
- */
- for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
- rq_cfg[i].rq_id = cpu_to_le16(0xffff);
- rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
- rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
- rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
- rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
- }
- /*
- * Update the rq_id's of the FCF configuration
- * (don't update more than the number of rq_cfg
- * elements)
- */
- min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
- hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
- for (i = 0; i < min_rq_count; i++) {
- struct hw_rq *rq = hw->hw_rq[i];
- u32 j;
- for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
- u32 mask = (rq->filter_mask != 0) ?
- rq->filter_mask : 1;
- if (!(mask & (1U << j)))
- continue;
- rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
- efct_logfcfi(hw, j, i, rq->hdr->id);
- }
- }
- rc = -EIO;
- if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
- rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
- if (rc != 0) {
- efc_log_err(hw->os, "FCFI registration failed\n");
- return rc;
- }
- hw->fcf_indicator =
- le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
- return rc;
- }
- static int
- efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
- {
- u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
- struct hw_rq *rq;
- struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
- struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
- u32 rc, i;
- if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
- goto issue_cmd;
- /* Set the filter match/mask values from hw's filter_def values */
- for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
- rq_filter[i].rq_id = cpu_to_le16(0xffff);
- rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
- rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
- rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
- rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
- }
- rq = hw->hw_rq[0];
- rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
- rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
- mrq_bitmask = 0x2;
- issue_cmd:
- efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
- hw->hw_rq_count, hw->config.rq_selection_policy, mode);
- /* Invoke REG_FCFI_MRQ */
- rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
- hw->config.rq_selection_policy, mrq_bitmask,
- hw->hw_mrq_count, rq_filter);
- if (rc) {
- efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
- return -EIO;
- }
- rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
- rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
- if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
- efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
- rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
- return -EIO;
- }
- if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
- hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
- return 0;
- }
- static void
- efct_hw_queue_hash_add(struct efct_queue_hash *hash,
- u16 id, u16 index)
- {
- u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
- /*
- * Since the hash is always bigger than the number of queues, then we
- * never have to worry about an infinite loop.
- */
- while (hash[hash_index].in_use)
- hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
- /* not used, claim the entry */
- hash[hash_index].id = id;
- hash[hash_index].in_use = true;
- hash[hash_index].index = index;
- }
- static int
- efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
- {
- int rc = 0;
- u8 buf[SLI4_BMBX_SIZE];
- struct sli4_rqst_cmn_set_features_health_check param;
- u32 health_check_flag = 0;
- memset(¶m, 0, sizeof(param));
- if (enable)
- health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
- if (query)
- health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
- param.health_check_dword = cpu_to_le32(health_check_flag);
- /* build the set_features command */
- sli_cmd_common_set_features(&hw->sli, buf,
- SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), ¶m);
- rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
- if (rc)
- efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
- else
- efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
- return rc;
- }
- int
- efct_hw_init(struct efct_hw *hw)
- {
- int rc;
- u32 i = 0;
- int rem_count;
- unsigned long flags = 0;
- struct efct_hw_io *temp;
- struct efc_dma *dma;
- /*
- * Make sure the command lists are empty. If this is start-of-day,
- * they'll be empty since they were just initialized in efct_hw_setup.
- * If we've just gone through a reset, the command and command pending
- * lists should have been cleaned up as part of the reset
- * (efct_hw_reset()).
- */
- spin_lock_irqsave(&hw->cmd_lock, flags);
- if (!list_empty(&hw->cmd_head)) {
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- efc_log_err(hw->os, "command found on cmd list\n");
- return -EIO;
- }
- if (!list_empty(&hw->cmd_pending)) {
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- efc_log_err(hw->os, "command found on pending list\n");
- return -EIO;
- }
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- /* Free RQ buffers if prevously allocated */
- efct_hw_rx_free(hw);
- /*
- * The IO queues must be initialized here for the reset case. The
- * efct_hw_init_io() function will re-add the IOs to the free list.
- * The cmd_head list should be OK since we free all entries in
- * efct_hw_command_cancel() that is called in the efct_hw_reset().
- */
- /* If we are in this function due to a reset, there may be stale items
- * on lists that need to be removed. Clean them up.
- */
- rem_count = 0;
- while ((!list_empty(&hw->io_wait_free))) {
- rem_count++;
- temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
- list_entry);
- list_del_init(&temp->list_entry);
- }
- if (rem_count > 0)
- efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
- rem_count);
- rem_count = 0;
- while ((!list_empty(&hw->io_inuse))) {
- rem_count++;
- temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
- list_entry);
- list_del_init(&temp->list_entry);
- }
- if (rem_count > 0)
- efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
- rem_count);
- rem_count = 0;
- while ((!list_empty(&hw->io_free))) {
- rem_count++;
- temp = list_first_entry(&hw->io_free, struct efct_hw_io,
- list_entry);
- list_del_init(&temp->list_entry);
- }
- if (rem_count > 0)
- efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
- rem_count);
- /* If MRQ not required, Make sure we dont request feature. */
- if (hw->config.n_rq == 1)
- hw->sli.features &= (~SLI4_REQFEAT_MRQP);
- if (sli_init(&hw->sli)) {
- efc_log_err(hw->os, "SLI failed to initialize\n");
- return -EIO;
- }
- if (hw->sliport_healthcheck) {
- rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
- if (rc != 0) {
- efc_log_err(hw->os, "Enable port Health check fail\n");
- return rc;
- }
- }
- /*
- * Set FDT transfer hint, only works on Lancer
- */
- if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
- /*
- * Non-fatal error. In particular, we can disregard failure to
- * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
- * that do not support EFCT_HW_FDT_XFER_HINT feature.
- */
- efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
- }
- /* zero the hashes */
- memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
- efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
- EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
- memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
- efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
- EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
- memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
- efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
- EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
- rc = efct_hw_init_queues(hw);
- if (rc)
- return rc;
- rc = efct_hw_map_wq_cpu(hw);
- if (rc)
- return rc;
- /* Allocate and p_st RQ buffers */
- rc = efct_hw_rx_allocate(hw);
- if (rc) {
- efc_log_err(hw->os, "rx_allocate failed\n");
- return rc;
- }
- rc = efct_hw_rx_post(hw);
- if (rc) {
- efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
- return rc;
- }
- if (hw->config.n_eq == 1) {
- rc = efct_hw_config_rq(hw);
- if (rc) {
- efc_log_err(hw->os, "config rq failed %d\n", rc);
- return rc;
- }
- } else {
- rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
- if (rc != 0) {
- efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
- return rc;
- }
- rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
- if (rc != 0) {
- efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
- return rc;
- }
- }
- /*
- * Allocate the WQ request tag pool, if not previously allocated
- * (the request tag value is 16 bits, thus the pool allocation size
- * of 64k)
- */
- hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
- if (!hw->wq_reqtag_pool) {
- efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
- return -ENOMEM;
- }
- rc = efct_hw_setup_io(hw);
- if (rc) {
- efc_log_err(hw->os, "IO allocation failure\n");
- return rc;
- }
- rc = efct_hw_init_io(hw);
- if (rc) {
- efc_log_err(hw->os, "IO initialization failure\n");
- return rc;
- }
- dma = &hw->loop_map;
- dma->size = SLI4_MIN_LOOP_MAP_BYTES;
- dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
- GFP_KERNEL);
- if (!dma->virt)
- return -EIO;
- /*
- * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
- * entries
- */
- for (i = 0; i < hw->eq_count; i++)
- sli_queue_arm(&hw->sli, &hw->eq[i], true);
- /*
- * Initialize RQ hash
- */
- for (i = 0; i < hw->rq_count; i++)
- efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
- /*
- * Initialize WQ hash
- */
- for (i = 0; i < hw->wq_count; i++)
- efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
- /*
- * Arming the CQ allows (e.g.) MQ completions to write CQ entries
- */
- for (i = 0; i < hw->cq_count; i++) {
- efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
- sli_queue_arm(&hw->sli, &hw->cq[i], true);
- }
- /* Set RQ process limit*/
- for (i = 0; i < hw->hw_rq_count; i++) {
- struct hw_rq *rq = hw->hw_rq[i];
- hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
- }
- /* record the fact that the queues are functional */
- hw->state = EFCT_HW_STATE_ACTIVE;
- /*
- * Allocate a HW IOs for send frame.
- */
- hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
- if (!hw->hw_wq[0]->send_frame_io)
- efc_log_err(hw->os, "alloc for send_frame_io failed\n");
- /* Initialize send frame sequence id */
- atomic_set(&hw->send_frame_seq_id, 0);
- return 0;
- }
- int
- efct_hw_parse_filter(struct efct_hw *hw, void *value)
- {
- int rc = 0;
- char *p = NULL;
- char *token;
- u32 idx = 0;
- for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
- hw->config.filter_def[idx] = 0;
- p = kstrdup(value, GFP_KERNEL);
- if (!p || !*p) {
- efc_log_err(hw->os, "p is NULL\n");
- return -ENOMEM;
- }
- idx = 0;
- while ((token = strsep(&p, ",")) && *token) {
- if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
- efc_log_err(hw->os, "kstrtoint failed\n");
- if (!p || !*p)
- break;
- if (idx == ARRAY_SIZE(hw->config.filter_def))
- break;
- }
- kfree(p);
- return rc;
- }
- u64
- efct_get_wwnn(struct efct_hw *hw)
- {
- struct sli4 *sli = &hw->sli;
- u8 p[8];
- memcpy(p, sli->wwnn, sizeof(p));
- return get_unaligned_be64(p);
- }
- u64
- efct_get_wwpn(struct efct_hw *hw)
- {
- struct sli4 *sli = &hw->sli;
- u8 p[8];
- memcpy(p, sli->wwpn, sizeof(p));
- return get_unaligned_be64(p);
- }
- static struct efc_hw_rq_buffer *
- efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
- u32 size)
- {
- struct efct *efct = hw->os;
- struct efc_hw_rq_buffer *rq_buf = NULL;
- struct efc_hw_rq_buffer *prq;
- u32 i;
- if (!count)
- return NULL;
- rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
- if (!rq_buf)
- return NULL;
- memset(rq_buf, 0, sizeof(*rq_buf) * count);
- for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
- prq->rqindex = rqindex;
- prq->dma.size = size;
- prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
- prq->dma.size,
- &prq->dma.phys,
- GFP_KERNEL);
- if (!prq->dma.virt) {
- efc_log_err(hw->os, "DMA allocation failed\n");
- kfree(rq_buf);
- return NULL;
- }
- }
- return rq_buf;
- }
- static void
- efct_hw_rx_buffer_free(struct efct_hw *hw,
- struct efc_hw_rq_buffer *rq_buf,
- u32 count)
- {
- struct efct *efct = hw->os;
- u32 i;
- struct efc_hw_rq_buffer *prq;
- if (rq_buf) {
- for (i = 0, prq = rq_buf; i < count; i++, prq++) {
- dma_free_coherent(&efct->pci->dev,
- prq->dma.size, prq->dma.virt,
- prq->dma.phys);
- memset(&prq->dma, 0, sizeof(struct efc_dma));
- }
- kfree(rq_buf);
- }
- }
- int
- efct_hw_rx_allocate(struct efct_hw *hw)
- {
- struct efct *efct = hw->os;
- u32 i;
- int rc = 0;
- u32 rqindex = 0;
- u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
- u32 payload_size = hw->config.rq_default_buffer_size;
- rqindex = 0;
- for (i = 0; i < hw->hw_rq_count; i++) {
- struct hw_rq *rq = hw->hw_rq[i];
- /* Allocate header buffers */
- rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
- rq->entry_count,
- hdr_size);
- if (!rq->hdr_buf) {
- efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
- rc = -EIO;
- break;
- }
- efc_log_debug(hw->os,
- "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
- i, rq->hdr->id, rq->entry_count, hdr_size);
- rqindex++;
- /* Allocate payload buffers */
- rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
- rq->entry_count,
- payload_size);
- if (!rq->payload_buf) {
- efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
- rc = -EIO;
- break;
- }
- efc_log_debug(hw->os,
- "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
- i, rq->data->id, rq->entry_count, payload_size);
- rqindex++;
- }
- return rc ? -EIO : 0;
- }
- int
- efct_hw_rx_post(struct efct_hw *hw)
- {
- u32 i;
- u32 idx;
- u32 rq_idx;
- int rc = 0;
- if (!hw->seq_pool) {
- u32 count = 0;
- for (i = 0; i < hw->hw_rq_count; i++)
- count += hw->hw_rq[i]->entry_count;
- hw->seq_pool = kmalloc_array(count,
- sizeof(struct efc_hw_sequence), GFP_KERNEL);
- if (!hw->seq_pool)
- return -ENOMEM;
- }
- /*
- * In RQ pair mode, we MUST post the header and payload buffer at the
- * same time.
- */
- for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
- struct hw_rq *rq = hw->hw_rq[rq_idx];
- for (i = 0; i < rq->entry_count - 1; i++) {
- struct efc_hw_sequence *seq;
- seq = hw->seq_pool + idx;
- idx++;
- seq->header = &rq->hdr_buf[i];
- seq->payload = &rq->payload_buf[i];
- rc = efct_hw_sequence_free(hw, seq);
- if (rc)
- break;
- }
- if (rc)
- break;
- }
- if (rc && hw->seq_pool)
- kfree(hw->seq_pool);
- return rc;
- }
- void
- efct_hw_rx_free(struct efct_hw *hw)
- {
- u32 i;
- /* Free hw_rq buffers */
- for (i = 0; i < hw->hw_rq_count; i++) {
- struct hw_rq *rq = hw->hw_rq[i];
- if (rq) {
- efct_hw_rx_buffer_free(hw, rq->hdr_buf,
- rq->entry_count);
- rq->hdr_buf = NULL;
- efct_hw_rx_buffer_free(hw, rq->payload_buf,
- rq->entry_count);
- rq->payload_buf = NULL;
- }
- }
- }
- static int
- efct_hw_cmd_submit_pending(struct efct_hw *hw)
- {
- int rc = 0;
- /* Assumes lock held */
- /* Only submit MQE if there's room */
- while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
- !list_empty(&hw->cmd_pending)) {
- struct efct_command_ctx *ctx;
- ctx = list_first_entry(&hw->cmd_pending,
- struct efct_command_ctx, list_entry);
- if (!ctx)
- break;
- list_del_init(&ctx->list_entry);
- list_add_tail(&ctx->list_entry, &hw->cmd_head);
- hw->cmd_head_count++;
- if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
- efc_log_debug(hw->os,
- "sli_queue_write failed: %d\n", rc);
- rc = -EIO;
- break;
- }
- }
- return rc;
- }
- int
- efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
- {
- int rc = -EIO;
- unsigned long flags = 0;
- void *bmbx = NULL;
- /*
- * If the chip is in an error state (UE'd) then reject this mailbox
- * command.
- */
- if (sli_fw_error_status(&hw->sli) > 0) {
- efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
- efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
- sli_reg_read_status(&hw->sli),
- sli_reg_read_err1(&hw->sli),
- sli_reg_read_err2(&hw->sli));
- return -EIO;
- }
- /*
- * Send a mailbox command to the hardware, and either wait for
- * a completion (EFCT_CMD_POLL) or get an optional asynchronous
- * completion (EFCT_CMD_NOWAIT).
- */
- if (opts == EFCT_CMD_POLL) {
- mutex_lock(&hw->bmbx_lock);
- bmbx = hw->sli.bmbx.virt;
- memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
- if (sli_bmbx_command(&hw->sli) == 0) {
- rc = 0;
- memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
- }
- mutex_unlock(&hw->bmbx_lock);
- } else if (opts == EFCT_CMD_NOWAIT) {
- struct efct_command_ctx *ctx = NULL;
- if (hw->state != EFCT_HW_STATE_ACTIVE) {
- efc_log_err(hw->os, "Can't send command, HW state=%d\n",
- hw->state);
- return -EIO;
- }
- ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
- if (!ctx)
- return -ENOSPC;
- memset(ctx, 0, sizeof(struct efct_command_ctx));
- if (cb) {
- ctx->cb = cb;
- ctx->arg = arg;
- }
- memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
- ctx->ctx = hw;
- spin_lock_irqsave(&hw->cmd_lock, flags);
- /* Add to pending list */
- INIT_LIST_HEAD(&ctx->list_entry);
- list_add_tail(&ctx->list_entry, &hw->cmd_pending);
- /* Submit as much of the pending list as we can */
- rc = efct_hw_cmd_submit_pending(hw);
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- }
- return rc;
- }
- static int
- efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
- size_t size)
- {
- struct efct_command_ctx *ctx = NULL;
- unsigned long flags = 0;
- spin_lock_irqsave(&hw->cmd_lock, flags);
- if (!list_empty(&hw->cmd_head)) {
- ctx = list_first_entry(&hw->cmd_head,
- struct efct_command_ctx, list_entry);
- list_del_init(&ctx->list_entry);
- }
- if (!ctx) {
- efc_log_err(hw->os, "no command context\n");
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- return -EIO;
- }
- hw->cmd_head_count--;
- /* Post any pending requests */
- efct_hw_cmd_submit_pending(hw);
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- if (ctx->cb) {
- memcpy(ctx->buf, mqe, size);
- ctx->cb(hw, status, ctx->buf, ctx->arg);
- }
- mempool_free(ctx, hw->cmd_ctx_pool);
- return 0;
- }
- static int
- efct_hw_mq_process(struct efct_hw *hw,
- int status, struct sli4_queue *mq)
- {
- u8 mqe[SLI4_BMBX_SIZE];
- int rc;
- rc = sli_mq_read(&hw->sli, mq, mqe);
- if (!rc)
- rc = efct_hw_command_process(hw, status, mqe, mq->size);
- return rc;
- }
- static int
- efct_hw_command_cancel(struct efct_hw *hw)
- {
- unsigned long flags = 0;
- int rc = 0;
- spin_lock_irqsave(&hw->cmd_lock, flags);
- /*
- * Manually clean up remaining commands. Note: since this calls
- * efct_hw_command_process(), we'll also process the cmd_pending
- * list, so no need to manually clean that out.
- */
- while (!list_empty(&hw->cmd_head)) {
- u8 mqe[SLI4_BMBX_SIZE] = { 0 };
- struct efct_command_ctx *ctx;
- ctx = list_first_entry(&hw->cmd_head,
- struct efct_command_ctx, list_entry);
- efc_log_debug(hw->os, "hung command %08x\n",
- !ctx ? U32_MAX : *((u32 *)ctx->buf));
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
- spin_lock_irqsave(&hw->cmd_lock, flags);
- }
- spin_unlock_irqrestore(&hw->cmd_lock, flags);
- return rc;
- }
- static void
- efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
- {
- struct efct_mbox_rqst_ctx *ctx = arg;
- if (ctx) {
- if (ctx->callback)
- (*ctx->callback)(hw->os->efcport, status, mqe,
- ctx->arg);
- mempool_free(ctx, hw->mbox_rqst_pool);
- }
- }
- int
- efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
- {
- struct efct_mbox_rqst_ctx *ctx;
- struct efct *efct = base;
- struct efct_hw *hw = &efct->hw;
- int rc;
- /*
- * Allocate a callback context (which includes the mbox cmd buffer),
- * we need this to be persistent as the mbox cmd submission may be
- * queued and executed later execution.
- */
- ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
- if (!ctx)
- return -EIO;
- ctx->callback = cb;
- ctx->arg = arg;
- rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
- if (rc) {
- efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
- mempool_free(ctx, hw->mbox_rqst_pool);
- return -EIO;
- }
- return 0;
- }
- static inline struct efct_hw_io *
- _efct_hw_io_alloc(struct efct_hw *hw)
- {
- struct efct_hw_io *io = NULL;
- if (!list_empty(&hw->io_free)) {
- io = list_first_entry(&hw->io_free, struct efct_hw_io,
- list_entry);
- list_del(&io->list_entry);
- }
- if (io) {
- INIT_LIST_HEAD(&io->list_entry);
- list_add_tail(&io->list_entry, &hw->io_inuse);
- io->state = EFCT_HW_IO_STATE_INUSE;
- io->abort_reqtag = U32_MAX;
- io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
- if (!io->wq) {
- efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
- raw_smp_processor_id());
- io->wq = hw->hw_wq[0];
- }
- kref_init(&io->ref);
- io->release = efct_hw_io_free_internal;
- } else {
- atomic_add(1, &hw->io_alloc_failed_count);
- }
- return io;
- }
- struct efct_hw_io *
- efct_hw_io_alloc(struct efct_hw *hw)
- {
- struct efct_hw_io *io = NULL;
- unsigned long flags = 0;
- spin_lock_irqsave(&hw->io_lock, flags);
- io = _efct_hw_io_alloc(hw);
- spin_unlock_irqrestore(&hw->io_lock, flags);
- return io;
- }
- static void
- efct_hw_io_free_move_correct_list(struct efct_hw *hw,
- struct efct_hw_io *io)
- {
- /*
- * When an IO is freed, depending on the exchange busy flag,
- * move it to the correct list.
- */
- if (io->xbusy) {
- /*
- * add to wait_free list and wait for XRI_ABORTED CQEs to clean
- * up
- */
- INIT_LIST_HEAD(&io->list_entry);
- list_add_tail(&io->list_entry, &hw->io_wait_free);
- io->state = EFCT_HW_IO_STATE_WAIT_FREE;
- } else {
- /* IO not busy, add to free list */
- INIT_LIST_HEAD(&io->list_entry);
- list_add_tail(&io->list_entry, &hw->io_free);
- io->state = EFCT_HW_IO_STATE_FREE;
- }
- }
- static inline void
- efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
- {
- /* initialize IO fields */
- efct_hw_init_free_io(io);
- /* Restore default SGL */
- efct_hw_io_restore_sgl(hw, io);
- }
- void
- efct_hw_io_free_internal(struct kref *arg)
- {
- unsigned long flags = 0;
- struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
- struct efct_hw *hw = io->hw;
- /* perform common cleanup */
- efct_hw_io_free_common(hw, io);
- spin_lock_irqsave(&hw->io_lock, flags);
- /* remove from in-use list */
- if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
- list_del_init(&io->list_entry);
- efct_hw_io_free_move_correct_list(hw, io);
- }
- spin_unlock_irqrestore(&hw->io_lock, flags);
- }
- int
- efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
- {
- return kref_put(&io->ref, io->release);
- }
- struct efct_hw_io *
- efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
- {
- u32 ioindex;
- ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
- return hw->io[ioindex];
- }
- int
- efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
- enum efct_hw_io_type type)
- {
- struct sli4_sge *data = NULL;
- u32 i = 0;
- u32 skips = 0;
- u32 sge_flags = 0;
- if (!io) {
- efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
- return -EIO;
- }
- /* Clear / reset the scatter-gather list */
- io->sgl = &io->def_sgl;
- io->sgl_count = io->def_sgl_count;
- io->first_data_sge = 0;
- memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
- io->n_sge = 0;
- io->sge_offset = 0;
- io->type = type;
- data = io->sgl->virt;
- /*
- * Some IO types have underlying hardware requirements on the order
- * of SGEs. Process all special entries here.
- */
- switch (type) {
- case EFCT_HW_IO_TARGET_WRITE:
- /* populate host resident XFER_RDY buffer */
- sge_flags = le32_to_cpu(data->dw2_flags);
- sge_flags &= (~SLI4_SGE_TYPE_MASK);
- sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
- data->buffer_address_high =
- cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
- data->buffer_address_low =
- cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
- data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
- data->dw2_flags = cpu_to_le32(sge_flags);
- data++;
- skips = EFCT_TARGET_WRITE_SKIPS;
- io->n_sge = 1;
- break;
- case EFCT_HW_IO_TARGET_READ:
- /*
- * For FCP_TSEND64, the first 2 entries are SKIP SGE's
- */
- skips = EFCT_TARGET_READ_SKIPS;
- break;
- case EFCT_HW_IO_TARGET_RSP:
- /*
- * No skips, etc. for FCP_TRSP64
- */
- break;
- default:
- efc_log_err(hw->os, "unsupported IO type %#x\n", type);
- return -EIO;
- }
- /*
- * Write skip entries
- */
- for (i = 0; i < skips; i++) {
- sge_flags = le32_to_cpu(data->dw2_flags);
- sge_flags &= (~SLI4_SGE_TYPE_MASK);
- sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
- data->dw2_flags = cpu_to_le32(sge_flags);
- data++;
- }
- io->n_sge += skips;
- /*
- * Set last
- */
- sge_flags = le32_to_cpu(data->dw2_flags);
- sge_flags |= SLI4_SGE_LAST;
- data->dw2_flags = cpu_to_le32(sge_flags);
- return 0;
- }
- int
- efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
- uintptr_t addr, u32 length)
- {
- struct sli4_sge *data = NULL;
- u32 sge_flags = 0;
- if (!io || !addr || !length) {
- efc_log_err(hw->os,
- "bad parameter hw=%p io=%p addr=%lx length=%u\n",
- hw, io, addr, length);
- return -EIO;
- }
- if (length > hw->sli.sge_supported_length) {
- efc_log_err(hw->os,
- "length of SGE %d bigger than allowed %d\n",
- length, hw->sli.sge_supported_length);
- return -EIO;
- }
- data = io->sgl->virt;
- data += io->n_sge;
- sge_flags = le32_to_cpu(data->dw2_flags);
- sge_flags &= ~SLI4_SGE_TYPE_MASK;
- sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
- sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
- sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
- data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
- data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
- data->buffer_length = cpu_to_le32(length);
- /*
- * Always assume this is the last entry and mark as such.
- * If this is not the first entry unset the "last SGE"
- * indication for the previous entry
- */
- sge_flags |= SLI4_SGE_LAST;
- data->dw2_flags = cpu_to_le32(sge_flags);
- if (io->n_sge) {
- sge_flags = le32_to_cpu(data[-1].dw2_flags);
- sge_flags &= ~SLI4_SGE_LAST;
- data[-1].dw2_flags = cpu_to_le32(sge_flags);
- }
- /* Set first_data_bde if not previously set */
- if (io->first_data_sge == 0)
- io->first_data_sge = io->n_sge;
- io->sge_offset += length;
- io->n_sge++;
- return 0;
- }
- void
- efct_hw_io_abort_all(struct efct_hw *hw)
- {
- struct efct_hw_io *io_to_abort = NULL;
- struct efct_hw_io *next_io = NULL;
- list_for_each_entry_safe(io_to_abort, next_io,
- &hw->io_inuse, list_entry) {
- efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
- }
- }
- static void
- efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
- {
- struct efct_hw_io *io = arg;
- struct efct_hw *hw = io->hw;
- u32 ext = 0;
- u32 len = 0;
- struct hw_wq_callback *wqcb;
- /*
- * For IOs that were aborted internally, we may need to issue the
- * callback here depending on whether a XRI_ABORTED CQE is expected ot
- * not. If the status is Local Reject/No XRI, then
- * issue the callback now.
- */
- ext = sli_fc_ext_status(&hw->sli, cqe);
- if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
- ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
- efct_hw_done_t done = io->done;
- io->done = NULL;
- /*
- * Use latched status as this is always saved for an internal
- * abort Note: We won't have both a done and abort_done
- * function, so don't worry about
- * clobbering the len, status and ext fields.
- */
- status = io->saved_status;
- len = io->saved_len;
- ext = io->saved_ext;
- io->status_saved = false;
- done(io, len, status, ext, io->arg);
- }
- if (io->abort_done) {
- efct_hw_done_t done = io->abort_done;
- io->abort_done = NULL;
- done(io, len, status, ext, io->abort_arg);
- }
- /* clear abort bit to indicate abort is complete */
- io->abort_in_progress = false;
- /* Free the WQ callback */
- if (io->abort_reqtag == U32_MAX) {
- efc_log_err(hw->os, "HW IO already freed\n");
- return;
- }
- wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
- efct_hw_reqtag_free(hw, wqcb);
- /*
- * Call efct_hw_io_free() because this releases the WQ reservation as
- * well as doing the refcount put. Don't duplicate the code here.
- */
- (void)efct_hw_io_free(hw, io);
- }
- static void
- efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
- {
- struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
- memset(abort, 0, hw->sli.wqe_size);
- abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
- abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
- /* Suppress ABTS retries */
- abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
- abort->t_tag = cpu_to_le32(wqe->id);
- abort->command = SLI4_WQE_ABORT;
- abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
- abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
- abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
- }
- int
- efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
- bool send_abts, void *cb, void *arg)
- {
- struct hw_wq_callback *wqcb;
- unsigned long flags = 0;
- if (!io_to_abort) {
- efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
- hw, io_to_abort);
- return -EIO;
- }
- if (hw->state != EFCT_HW_STATE_ACTIVE) {
- efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
- hw->state);
- return -EIO;
- }
- /* take a reference on IO being aborted */
- if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
- /* command no longer active */
- efc_log_debug(hw->os,
- "io not active xri=0x%x tag=0x%x\n",
- io_to_abort->indicator, io_to_abort->reqtag);
- return -ENOENT;
- }
- /* Must have a valid WQ reference */
- if (!io_to_abort->wq) {
- efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
- io_to_abort->indicator);
- /* efct_ref_get(): same function */
- kref_put(&io_to_abort->ref, io_to_abort->release);
- return -ENOENT;
- }
- /*
- * Validation checks complete; now check to see if already being
- * aborted, if not set the flag.
- */
- if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
- /* efct_ref_get(): same function */
- kref_put(&io_to_abort->ref, io_to_abort->release);
- efc_log_debug(hw->os,
- "io already being aborted xri=0x%x tag=0x%x\n",
- io_to_abort->indicator, io_to_abort->reqtag);
- return -EINPROGRESS;
- }
- /*
- * If we got here, the possibilities are:
- * - host owned xri
- * - io_to_abort->wq_index != U32_MAX
- * - submit ABORT_WQE to same WQ
- * - port owned xri:
- * - rxri: io_to_abort->wq_index == U32_MAX
- * - submit ABORT_WQE to any WQ
- * - non-rxri
- * - io_to_abort->index != U32_MAX
- * - submit ABORT_WQE to same WQ
- * - io_to_abort->index == U32_MAX
- * - submit ABORT_WQE to any WQ
- */
- io_to_abort->abort_done = cb;
- io_to_abort->abort_arg = arg;
- /* Allocate a request tag for the abort portion of this IO */
- wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
- if (!wqcb) {
- efc_log_err(hw->os, "can't allocate request tag\n");
- return -ENOSPC;
- }
- io_to_abort->abort_reqtag = wqcb->instance_index;
- io_to_abort->wqe.send_abts = send_abts;
- io_to_abort->wqe.id = io_to_abort->indicator;
- io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
- /*
- * If the wqe is on the pending list, then set this wqe to be
- * aborted when the IO's wqe is removed from the list.
- */
- if (io_to_abort->wq) {
- spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
- if (io_to_abort->wqe.list_entry.next) {
- io_to_abort->wqe.abort_wqe_submit_needed = true;
- spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
- flags);
- return 0;
- }
- spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
- }
- efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
- /* ABORT_WQE does not actually utilize an XRI on the Port,
- * therefore, keep xbusy as-is to track the exchange's state,
- * not the ABORT_WQE's state
- */
- if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
- io_to_abort->abort_in_progress = false;
- /* efct_ref_get(): same function */
- kref_put(&io_to_abort->ref, io_to_abort->release);
- return -EIO;
- }
- return 0;
- }
- void
- efct_hw_reqtag_pool_free(struct efct_hw *hw)
- {
- u32 i;
- struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
- struct hw_wq_callback *wqcb = NULL;
- if (reqtag_pool) {
- for (i = 0; i < U16_MAX; i++) {
- wqcb = reqtag_pool->tags[i];
- if (!wqcb)
- continue;
- kfree(wqcb);
- }
- kfree(reqtag_pool);
- hw->wq_reqtag_pool = NULL;
- }
- }
- struct reqtag_pool *
- efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
- {
- u32 i = 0;
- struct reqtag_pool *reqtag_pool;
- struct hw_wq_callback *wqcb;
- reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
- if (!reqtag_pool)
- return NULL;
- INIT_LIST_HEAD(&reqtag_pool->freelist);
- /* initialize reqtag pool lock */
- spin_lock_init(&reqtag_pool->lock);
- for (i = 0; i < U16_MAX; i++) {
- wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
- if (!wqcb)
- break;
- reqtag_pool->tags[i] = wqcb;
- wqcb->instance_index = i;
- wqcb->callback = NULL;
- wqcb->arg = NULL;
- INIT_LIST_HEAD(&wqcb->list_entry);
- list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
- }
- return reqtag_pool;
- }
- struct hw_wq_callback *
- efct_hw_reqtag_alloc(struct efct_hw *hw,
- void (*callback)(void *arg, u8 *cqe, int status),
- void *arg)
- {
- struct hw_wq_callback *wqcb = NULL;
- struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
- unsigned long flags = 0;
- if (!callback)
- return wqcb;
- spin_lock_irqsave(&reqtag_pool->lock, flags);
- if (!list_empty(&reqtag_pool->freelist)) {
- wqcb = list_first_entry(&reqtag_pool->freelist,
- struct hw_wq_callback, list_entry);
- }
- if (wqcb) {
- list_del_init(&wqcb->list_entry);
- spin_unlock_irqrestore(&reqtag_pool->lock, flags);
- wqcb->callback = callback;
- wqcb->arg = arg;
- } else {
- spin_unlock_irqrestore(&reqtag_pool->lock, flags);
- }
- return wqcb;
- }
- void
- efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
- {
- unsigned long flags = 0;
- struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
- if (!wqcb->callback)
- efc_log_err(hw->os, "WQCB is already freed\n");
- spin_lock_irqsave(&reqtag_pool->lock, flags);
- wqcb->callback = NULL;
- wqcb->arg = NULL;
- INIT_LIST_HEAD(&wqcb->list_entry);
- list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
- spin_unlock_irqrestore(&reqtag_pool->lock, flags);
- }
- struct hw_wq_callback *
- efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
- {
- struct hw_wq_callback *wqcb;
- wqcb = hw->wq_reqtag_pool->tags[instance_index];
- if (!wqcb)
- efc_log_err(hw->os, "wqcb for instance %d is null\n",
- instance_index);
- return wqcb;
- }
- int
- efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
- {
- int index = -1;
- int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
- /*
- * Since the hash is always bigger than the maximum number of Qs, then
- * we never have to worry about an infinite loop. We will always find
- * an unused entry.
- */
- do {
- if (hash[i].in_use && hash[i].id == id)
- index = hash[i].index;
- else
- i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
- } while (index == -1 && hash[i].in_use);
- return index;
- }
- int
- efct_hw_process(struct efct_hw *hw, u32 vector,
- u32 max_isr_time_msec)
- {
- struct hw_eq *eq;
- /*
- * The caller should disable interrupts if they wish to prevent us
- * from processing during a shutdown. The following states are defined:
- * EFCT_HW_STATE_UNINITIALIZED - No queues allocated
- * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
- * queues are cleared.
- * EFCT_HW_STATE_ACTIVE - Chip and queues are operational
- * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
- * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
- * completions.
- */
- if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
- return 0;
- /* Get pointer to struct hw_eq */
- eq = hw->hw_eq[vector];
- if (!eq)
- return 0;
- eq->use_count++;
- return efct_hw_eq_process(hw, eq, max_isr_time_msec);
- }
- int
- efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
- u32 max_isr_time_msec)
- {
- u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
- u32 tcheck_count;
- u64 tstart;
- u64 telapsed;
- bool done = false;
- tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
- tstart = jiffies_to_msecs(jiffies);
- while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
- u16 cq_id = 0;
- int rc;
- rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
- if (unlikely(rc)) {
- if (rc == SLI4_EQE_STATUS_EQ_FULL) {
- u32 i;
- /*
- * Received a sentinel EQE indicating the
- * EQ is full. Process all CQs
- */
- for (i = 0; i < hw->cq_count; i++)
- efct_hw_cq_process(hw, hw->hw_cq[i]);
- continue;
- } else {
- return rc;
- }
- } else {
- int index;
- index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
- if (likely(index >= 0))
- efct_hw_cq_process(hw, hw->hw_cq[index]);
- else
- efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
- }
- if (eq->queue->n_posted > eq->queue->posted_limit)
- sli_queue_arm(&hw->sli, eq->queue, false);
- if (tcheck_count && (--tcheck_count == 0)) {
- tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
- telapsed = jiffies_to_msecs(jiffies) - tstart;
- if (telapsed >= max_isr_time_msec)
- done = true;
- }
- }
- sli_queue_eq_arm(&hw->sli, eq->queue, true);
- return 0;
- }
- static int
- _efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
- {
- int queue_rc;
- /* Every so often, set the wqec bit to generate comsummed completions */
- if (wq->wqec_count)
- wq->wqec_count--;
- if (wq->wqec_count == 0) {
- struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
- genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
- wq->wqec_count = wq->wqec_set_count;
- }
- /* Decrement WQ free count */
- wq->free_count--;
- queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
- return (queue_rc < 0) ? -EIO : 0;
- }
- static void
- hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
- {
- struct efct_hw_wqe *wqe;
- unsigned long flags = 0;
- spin_lock_irqsave(&wq->queue->lock, flags);
- /* Update free count with value passed in */
- wq->free_count += update_free_count;
- while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
- wqe = list_first_entry(&wq->pending_list,
- struct efct_hw_wqe, list_entry);
- list_del_init(&wqe->list_entry);
- _efct_hw_wq_write(wq, wqe);
- if (wqe->abort_wqe_submit_needed) {
- wqe->abort_wqe_submit_needed = false;
- efct_hw_fill_abort_wqe(wq->hw, wqe);
- INIT_LIST_HEAD(&wqe->list_entry);
- list_add_tail(&wqe->list_entry, &wq->pending_list);
- wq->wq_pending_count++;
- }
- }
- spin_unlock_irqrestore(&wq->queue->lock, flags);
- }
- void
- efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
- {
- u8 cqe[sizeof(struct sli4_mcqe)];
- u16 rid = U16_MAX;
- /* completion type */
- enum sli4_qentry ctype;
- u32 n_processed = 0;
- u32 tstart, telapsed;
- tstart = jiffies_to_msecs(jiffies);
- while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
- int status;
- status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
- /*
- * The sign of status is significant. If status is:
- * == 0 : call completed correctly and
- * the CQE indicated success
- * > 0 : call completed correctly and
- * the CQE indicated an error
- * < 0 : call failed and no information is available about the
- * CQE
- */
- if (status < 0) {
- if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
- /*
- * Notification that an entry was consumed,
- * but not completed
- */
- continue;
- break;
- }
- switch (ctype) {
- case SLI4_QENTRY_ASYNC:
- sli_cqe_async(&hw->sli, cqe);
- break;
- case SLI4_QENTRY_MQ:
- /*
- * Process MQ entry. Note there is no way to determine
- * the MQ_ID from the completion entry.
- */
- efct_hw_mq_process(hw, status, hw->mq);
- break;
- case SLI4_QENTRY_WQ:
- efct_hw_wq_process(hw, cq, cqe, status, rid);
- break;
- case SLI4_QENTRY_WQ_RELEASE: {
- u32 wq_id = rid;
- int index;
- struct hw_wq *wq = NULL;
- index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
- if (likely(index >= 0)) {
- wq = hw->hw_wq[index];
- } else {
- efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
- break;
- }
- /* Submit any HW IOs that are on the WQ pending list */
- hw_wq_submit_pending(wq, wq->wqec_set_count);
- break;
- }
- case SLI4_QENTRY_RQ:
- efct_hw_rqpair_process_rq(hw, cq, cqe);
- break;
- case SLI4_QENTRY_XABT: {
- efct_hw_xabt_process(hw, cq, cqe, rid);
- break;
- }
- default:
- efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
- ctype, rid);
- break;
- }
- n_processed++;
- if (n_processed == cq->queue->proc_limit)
- break;
- if (cq->queue->n_posted >= cq->queue->posted_limit)
- sli_queue_arm(&hw->sli, cq->queue, false);
- }
- sli_queue_arm(&hw->sli, cq->queue, true);
- if (n_processed > cq->queue->max_num_processed)
- cq->queue->max_num_processed = n_processed;
- telapsed = jiffies_to_msecs(jiffies) - tstart;
- if (telapsed > cq->queue->max_process_time)
- cq->queue->max_process_time = telapsed;
- }
- void
- efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
- u8 *cqe, int status, u16 rid)
- {
- struct hw_wq_callback *wqcb;
- if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
- if (status)
- efc_log_err(hw->os, "reque xri failed, status = %d\n",
- status);
- return;
- }
- wqcb = efct_hw_reqtag_get_instance(hw, rid);
- if (!wqcb) {
- efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
- return;
- }
- if (!wqcb->callback) {
- efc_log_err(hw->os, "wqcb callback is NULL\n");
- return;
- }
- (*wqcb->callback)(wqcb->arg, cqe, status);
- }
- void
- efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
- u8 *cqe, u16 rid)
- {
- /* search IOs wait free list */
- struct efct_hw_io *io = NULL;
- unsigned long flags = 0;
- io = efct_hw_io_lookup(hw, rid);
- if (!io) {
- /* IO lookup failure should never happen */
- efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
- return;
- }
- if (!io->xbusy)
- efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
- else
- /* mark IO as no longer busy */
- io->xbusy = false;
- /*
- * For IOs that were aborted internally, we need to issue any pending
- * callback here.
- */
- if (io->done) {
- efct_hw_done_t done = io->done;
- void *arg = io->arg;
- /*
- * Use latched status as this is always saved for an internal
- * abort
- */
- int status = io->saved_status;
- u32 len = io->saved_len;
- u32 ext = io->saved_ext;
- io->done = NULL;
- io->status_saved = false;
- done(io, len, status, ext, arg);
- }
- spin_lock_irqsave(&hw->io_lock, flags);
- if (io->state == EFCT_HW_IO_STATE_INUSE ||
- io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
- /* if on wait_free list, caller has already freed IO;
- * remove from wait_free list and add to free list.
- * if on in-use list, already marked as no longer busy;
- * just leave there and wait for caller to free.
- */
- if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
- io->state = EFCT_HW_IO_STATE_FREE;
- list_del_init(&io->list_entry);
- efct_hw_io_free_move_correct_list(hw, io);
- }
- }
- spin_unlock_irqrestore(&hw->io_lock, flags);
- }
- static int
- efct_hw_flush(struct efct_hw *hw)
- {
- u32 i = 0;
- /* Process any remaining completions */
- for (i = 0; i < hw->eq_count; i++)
- efct_hw_process(hw, i, ~0);
- return 0;
- }
- int
- efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
- {
- int rc = 0;
- unsigned long flags = 0;
- spin_lock_irqsave(&wq->queue->lock, flags);
- if (list_empty(&wq->pending_list)) {
- if (wq->free_count > 0) {
- rc = _efct_hw_wq_write(wq, wqe);
- } else {
- INIT_LIST_HEAD(&wqe->list_entry);
- list_add_tail(&wqe->list_entry, &wq->pending_list);
- wq->wq_pending_count++;
- }
- spin_unlock_irqrestore(&wq->queue->lock, flags);
- return rc;
- }
- INIT_LIST_HEAD(&wqe->list_entry);
- list_add_tail(&wqe->list_entry, &wq->pending_list);
- wq->wq_pending_count++;
- while (wq->free_count > 0) {
- wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
- list_entry);
- if (!wqe)
- break;
- list_del_init(&wqe->list_entry);
- rc = _efct_hw_wq_write(wq, wqe);
- if (rc)
- break;
- if (wqe->abort_wqe_submit_needed) {
- wqe->abort_wqe_submit_needed = false;
- efct_hw_fill_abort_wqe(wq->hw, wqe);
- INIT_LIST_HEAD(&wqe->list_entry);
- list_add_tail(&wqe->list_entry, &wq->pending_list);
- wq->wq_pending_count++;
- }
- }
- spin_unlock_irqrestore(&wq->queue->lock, flags);
- return rc;
- }
- int
- efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
- {
- struct efct *efct = efc->base;
- return efct_hw_bls_send(efct, type, bls, NULL, NULL);
- }
- int
- efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
- void *cb, void *arg)
- {
- struct efct_hw *hw = &efct->hw;
- struct efct_hw_io *hio;
- struct sli_bls_payload bls;
- int rc;
- if (hw->state != EFCT_HW_STATE_ACTIVE) {
- efc_log_err(hw->os,
- "cannot send BLS, HW state=%d\n", hw->state);
- return -EIO;
- }
- hio = efct_hw_io_alloc(hw);
- if (!hio) {
- efc_log_err(hw->os, "HIO allocation failed\n");
- return -EIO;
- }
- hio->done = cb;
- hio->arg = arg;
- bls_params->xri = hio->indicator;
- bls_params->tag = hio->reqtag;
- if (type == FC_RCTL_BA_ACC) {
- hio->type = EFCT_HW_BLS_ACC;
- bls.type = SLI4_SLI_BLS_ACC;
- memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
- } else {
- hio->type = EFCT_HW_BLS_RJT;
- bls.type = SLI4_SLI_BLS_RJT;
- memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
- }
- bls.ox_id = cpu_to_le16(bls_params->ox_id);
- bls.rx_id = cpu_to_le16(bls_params->rx_id);
- if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
- &bls, bls_params)) {
- efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
- return -EIO;
- }
- hio->xbusy = true;
- /*
- * Add IO to active io wqe list before submitting, in case the
- * wcqe processing preempts this thread.
- */
- hio->wq->use_count++;
- rc = efct_hw_wq_write(hio->wq, &hio->wqe);
- if (rc >= 0) {
- /* non-negative return is success */
- rc = 0;
- } else {
- /* failed to write wqe, remove from active wqe list */
- efc_log_err(hw->os,
- "sli_queue_write failed: %d\n", rc);
- hio->xbusy = false;
- }
- return rc;
- }
- static int
- efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
- u32 ext_status, void *arg)
- {
- struct efc_disc_io *io = arg;
- efc_disc_io_complete(io, length, status, ext_status);
- return 0;
- }
- static inline void
- efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
- {
- u8 *cmd = io->req.virt;
- params->cmd = *cmd;
- params->s_id = io->s_id;
- params->d_id = io->d_id;
- params->ox_id = io->iparam.els.ox_id;
- params->rpi = io->rpi;
- params->vpi = io->vpi;
- params->rpi_registered = io->rpi_registered;
- params->xmit_len = io->xmit_len;
- params->rsp_len = io->rsp_len;
- params->timeout = io->iparam.els.timeout;
- }
- static inline void
- efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
- {
- params->r_ctl = io->iparam.ct.r_ctl;
- params->type = io->iparam.ct.type;
- params->df_ctl = io->iparam.ct.df_ctl;
- params->d_id = io->d_id;
- params->ox_id = io->iparam.ct.ox_id;
- params->rpi = io->rpi;
- params->vpi = io->vpi;
- params->rpi_registered = io->rpi_registered;
- params->xmit_len = io->xmit_len;
- params->rsp_len = io->rsp_len;
- params->timeout = io->iparam.ct.timeout;
- }
- /**
- * efct_els_hw_srrs_send() - Send a single request and response cmd.
- * @efc: efc library structure
- * @io: Discovery IO used to hold els and ct cmd context.
- *
- * This routine supports communication sequences consisting of a single
- * request and single response between two endpoints. Examples include:
- * - Sending an ELS request.
- * - Sending an ELS response - To send an ELS response, the caller must provide
- * the OX_ID from the received request.
- * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
- * the caller must provide the R_CTL, TYPE, and DF_CTL
- * values to place in the FC frame header.
- *
- * Return: Status of the request.
- */
- int
- efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
- {
- struct efct *efct = efc->base;
- struct efct_hw_io *hio;
- struct efct_hw *hw = &efct->hw;
- struct efc_dma *send = &io->req;
- struct efc_dma *receive = &io->rsp;
- struct sli4_sge *sge = NULL;
- int rc = 0;
- u32 len = io->xmit_len;
- u32 sge0_flags;
- u32 sge1_flags;
- hio = efct_hw_io_alloc(hw);
- if (!hio) {
- pr_err("HIO alloc failed\n");
- return -EIO;
- }
- if (hw->state != EFCT_HW_STATE_ACTIVE) {
- efc_log_debug(hw->os,
- "cannot send SRRS, HW state=%d\n", hw->state);
- return -EIO;
- }
- hio->done = efct_els_ssrs_send_cb;
- hio->arg = io;
- sge = hio->sgl->virt;
- /* clear both SGE */
- memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
- sge0_flags = le32_to_cpu(sge[0].dw2_flags);
- sge1_flags = le32_to_cpu(sge[1].dw2_flags);
- if (send->size) {
- sge[0].buffer_address_high =
- cpu_to_le32(upper_32_bits(send->phys));
- sge[0].buffer_address_low =
- cpu_to_le32(lower_32_bits(send->phys));
- sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
- sge[0].buffer_length = cpu_to_le32(len);
- }
- if (io->io_type == EFC_DISC_IO_ELS_REQ ||
- io->io_type == EFC_DISC_IO_CT_REQ) {
- sge[1].buffer_address_high =
- cpu_to_le32(upper_32_bits(receive->phys));
- sge[1].buffer_address_low =
- cpu_to_le32(lower_32_bits(receive->phys));
- sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
- sge1_flags |= SLI4_SGE_LAST;
- sge[1].buffer_length = cpu_to_le32(receive->size);
- } else {
- sge0_flags |= SLI4_SGE_LAST;
- }
- sge[0].dw2_flags = cpu_to_le32(sge0_flags);
- sge[1].dw2_flags = cpu_to_le32(sge1_flags);
- switch (io->io_type) {
- case EFC_DISC_IO_ELS_REQ: {
- struct sli_els_params els_params;
- hio->type = EFCT_HW_ELS_REQ;
- efct_fill_els_params(io, &els_params);
- els_params.xri = hio->indicator;
- els_params.tag = hio->reqtag;
- if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
- &els_params)) {
- efc_log_err(hw->os, "REQ WQE error\n");
- rc = -EIO;
- }
- break;
- }
- case EFC_DISC_IO_ELS_RESP: {
- struct sli_els_params els_params;
- hio->type = EFCT_HW_ELS_RSP;
- efct_fill_els_params(io, &els_params);
- els_params.xri = hio->indicator;
- els_params.tag = hio->reqtag;
- if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
- &els_params)){
- efc_log_err(hw->os, "RSP WQE error\n");
- rc = -EIO;
- }
- break;
- }
- case EFC_DISC_IO_CT_REQ: {
- struct sli_ct_params ct_params;
- hio->type = EFCT_HW_FC_CT;
- efct_fill_ct_params(io, &ct_params);
- ct_params.xri = hio->indicator;
- ct_params.tag = hio->reqtag;
- if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
- &ct_params)){
- efc_log_err(hw->os, "GEN WQE error\n");
- rc = -EIO;
- }
- break;
- }
- case EFC_DISC_IO_CT_RESP: {
- struct sli_ct_params ct_params;
- hio->type = EFCT_HW_FC_CT_RSP;
- efct_fill_ct_params(io, &ct_params);
- ct_params.xri = hio->indicator;
- ct_params.tag = hio->reqtag;
- if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
- &ct_params)){
- efc_log_err(hw->os, "XMIT SEQ WQE error\n");
- rc = -EIO;
- }
- break;
- }
- default:
- efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
- rc = -EIO;
- }
- if (rc == 0) {
- hio->xbusy = true;
- /*
- * Add IO to active io wqe list before submitting, in case the
- * wcqe processing preempts this thread.
- */
- hio->wq->use_count++;
- rc = efct_hw_wq_write(hio->wq, &hio->wqe);
- if (rc >= 0) {
- /* non-negative return is success */
- rc = 0;
- } else {
- /* failed to write wqe, remove from active wqe list */
- efc_log_err(hw->os,
- "sli_queue_write failed: %d\n", rc);
- hio->xbusy = false;
- }
- }
- return rc;
- }
- int
- efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
- struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
- void *cb, void *arg)
- {
- int rc = 0;
- bool send_wqe = true;
- if (!io) {
- pr_err("bad parm hw=%p io=%p\n", hw, io);
- return -EIO;
- }
- if (hw->state != EFCT_HW_STATE_ACTIVE) {
- efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
- return -EIO;
- }
- /*
- * Save state needed during later stages
- */
- io->type = type;
- io->done = cb;
- io->arg = arg;
- /*
- * Format the work queue entry used to send the IO
- */
- switch (type) {
- case EFCT_HW_IO_TARGET_WRITE: {
- u16 *flags = &iparam->fcp_tgt.flags;
- struct fcp_txrdy *xfer = io->xfer_rdy.virt;
- /*
- * Fill in the XFER_RDY for IF_TYPE 0 devices
- */
- xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
- xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
- if (io->xbusy)
- *flags |= SLI4_IO_CONTINUATION;
- else
- *flags &= ~SLI4_IO_CONTINUATION;
- iparam->fcp_tgt.xri = io->indicator;
- iparam->fcp_tgt.tag = io->reqtag;
- if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
- &io->def_sgl, io->first_data_sge,
- SLI4_CQ_DEFAULT,
- 0, 0, &iparam->fcp_tgt)) {
- efc_log_err(hw->os, "TRECEIVE WQE error\n");
- rc = -EIO;
- }
- break;
- }
- case EFCT_HW_IO_TARGET_READ: {
- u16 *flags = &iparam->fcp_tgt.flags;
- if (io->xbusy)
- *flags |= SLI4_IO_CONTINUATION;
- else
- *flags &= ~SLI4_IO_CONTINUATION;
- iparam->fcp_tgt.xri = io->indicator;
- iparam->fcp_tgt.tag = io->reqtag;
- if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
- &io->def_sgl, io->first_data_sge,
- SLI4_CQ_DEFAULT,
- 0, 0, &iparam->fcp_tgt)) {
- efc_log_err(hw->os, "TSEND WQE error\n");
- rc = -EIO;
- }
- break;
- }
- case EFCT_HW_IO_TARGET_RSP: {
- u16 *flags = &iparam->fcp_tgt.flags;
- if (io->xbusy)
- *flags |= SLI4_IO_CONTINUATION;
- else
- *flags &= ~SLI4_IO_CONTINUATION;
- iparam->fcp_tgt.xri = io->indicator;
- iparam->fcp_tgt.tag = io->reqtag;
- if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
- &io->def_sgl, SLI4_CQ_DEFAULT,
- 0, &iparam->fcp_tgt)) {
- efc_log_err(hw->os, "TRSP WQE error\n");
- rc = -EIO;
- }
- break;
- }
- default:
- efc_log_err(hw->os, "unsupported IO type %#x\n", type);
- rc = -EIO;
- }
- if (send_wqe && rc == 0) {
- io->xbusy = true;
- /*
- * Add IO to active io wqe list before submitting, in case the
- * wcqe processing preempts this thread.
- */
- hw->tcmd_wq_submit[io->wq->instance]++;
- io->wq->use_count++;
- rc = efct_hw_wq_write(io->wq, &io->wqe);
- if (rc >= 0) {
- /* non-negative return is success */
- rc = 0;
- } else {
- /* failed to write wqe, remove from active wqe list */
- efc_log_err(hw->os,
- "sli_queue_write failed: %d\n", rc);
- io->xbusy = false;
- }
- }
- return rc;
- }
- int
- efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
- u8 sof, u8 eof, struct efc_dma *payload,
- struct efct_hw_send_frame_context *ctx,
- void (*callback)(void *arg, u8 *cqe, int status),
- void *arg)
- {
- int rc;
- struct efct_hw_wqe *wqe;
- u32 xri;
- struct hw_wq *wq;
- wqe = &ctx->wqe;
- /* populate the callback object */
- ctx->hw = hw;
- /* Fetch and populate request tag */
- ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
- if (!ctx->wqcb) {
- efc_log_err(hw->os, "can't allocate request tag\n");
- return -ENOSPC;
- }
- wq = hw->hw_wq[0];
- /* Set XRI and RX_ID in the header based on which WQ, and which
- * send_frame_io we are using
- */
- xri = wq->send_frame_io->indicator;
- /* Build the send frame WQE */
- rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
- sof, eof, (u32 *)hdr, payload, payload->len,
- EFCT_HW_SEND_FRAME_TIMEOUT, xri,
- ctx->wqcb->instance_index);
- if (rc) {
- efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
- return -EIO;
- }
- /* Write to WQ */
- rc = efct_hw_wq_write(wq, wqe);
- if (rc) {
- efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
- return -EIO;
- }
- wq->use_count++;
- return 0;
- }
- static int
- efct_hw_cb_link_stat(struct efct_hw *hw, int status,
- u8 *mqe, void *arg)
- {
- struct sli4_cmd_read_link_stats *mbox_rsp;
- struct efct_hw_link_stat_cb_arg *cb_arg = arg;
- struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
- u32 num_counters, i;
- u32 mbox_rsp_flags = 0;
- mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
- mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
- num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
- memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
- EFCT_HW_LINK_STAT_MAX);
- /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/
- for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
- counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
- counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
- le32_to_cpu(mbox_rsp->linkfail_errcnt);
- counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
- le32_to_cpu(mbox_rsp->losssync_errcnt);
- counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
- le32_to_cpu(mbox_rsp->losssignal_errcnt);
- counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
- le32_to_cpu(mbox_rsp->primseq_errcnt);
- counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
- le32_to_cpu(mbox_rsp->inval_txword_errcnt);
- counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
- le32_to_cpu(mbox_rsp->crc_errcnt);
- counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
- le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
- counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
- le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
- counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
- le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
- counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
- le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
- counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
- le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
- counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
- le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
- counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
- le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
- counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_eofa_cnt);
- counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
- counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_eofni_cnt);
- counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_soff_cnt);
- counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
- counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
- counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
- le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
- if (cb_arg) {
- if (cb_arg->cb) {
- if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
- status = le16_to_cpu(mbox_rsp->hdr.status);
- cb_arg->cb(status, num_counters, counts, cb_arg->arg);
- }
- kfree(cb_arg);
- }
- return 0;
- }
- int
- efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
- u8 clear_overflow_flags, u8 clear_all_counters,
- void (*cb)(int status, u32 num_counters,
- struct efct_hw_link_stat_counts *counters,
- void *arg),
- void *arg)
- {
- int rc = -EIO;
- struct efct_hw_link_stat_cb_arg *cb_arg;
- u8 mbxdata[SLI4_BMBX_SIZE];
- cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
- if (!cb_arg)
- return -ENOMEM;
- cb_arg->cb = cb;
- cb_arg->arg = arg;
- /* Send the HW command */
- if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
- clear_overflow_flags, clear_all_counters))
- rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
- efct_hw_cb_link_stat, cb_arg);
- if (rc)
- kfree(cb_arg);
- return rc;
- }
- static int
- efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
- {
- struct sli4_cmd_read_status *mbox_rsp =
- (struct sli4_cmd_read_status *)mqe;
- struct efct_hw_host_stat_cb_arg *cb_arg = arg;
- struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
- u32 num_counters = EFCT_HW_HOST_STAT_MAX;
- memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
- EFCT_HW_HOST_STAT_MAX);
- counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
- le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
- counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
- le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
- counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
- le32_to_cpu(mbox_rsp->trans_frame_cnt);
- counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
- le32_to_cpu(mbox_rsp->recv_frame_cnt);
- counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
- le32_to_cpu(mbox_rsp->trans_seq_cnt);
- counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
- le32_to_cpu(mbox_rsp->recv_seq_cnt);
- counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
- le32_to_cpu(mbox_rsp->tot_exchanges_orig);
- counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
- le32_to_cpu(mbox_rsp->tot_exchanges_resp);
- counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
- le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
- counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
- le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
- counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
- le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
- counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
- le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
- counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
- le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
- counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
- le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
- if (cb_arg) {
- if (cb_arg->cb) {
- if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
- status = le16_to_cpu(mbox_rsp->hdr.status);
- cb_arg->cb(status, num_counters, counts, cb_arg->arg);
- }
- kfree(cb_arg);
- }
- return 0;
- }
- int
- efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
- void (*cb)(int status, u32 num_counters,
- struct efct_hw_host_stat_counts *counters,
- void *arg),
- void *arg)
- {
- int rc = -EIO;
- struct efct_hw_host_stat_cb_arg *cb_arg;
- u8 mbxdata[SLI4_BMBX_SIZE];
- cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
- if (!cb_arg)
- return -ENOMEM;
- cb_arg->cb = cb;
- cb_arg->arg = arg;
- /* Send the HW command to get the host stats */
- if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
- rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
- efct_hw_cb_host_stat, cb_arg);
- if (rc) {
- efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
- kfree(cb_arg);
- }
- return rc;
- }
- struct efct_hw_async_call_ctx {
- efct_hw_async_cb_t callback;
- void *arg;
- u8 cmd[SLI4_BMBX_SIZE];
- };
- static void
- efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
- {
- struct efct_hw_async_call_ctx *ctx = arg;
- if (ctx) {
- if (ctx->callback)
- (*ctx->callback)(hw, status, mqe, ctx->arg);
- kfree(ctx);
- }
- }
- int
- efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
- {
- struct efct_hw_async_call_ctx *ctx;
- int rc;
- /*
- * Allocate a callback context (which includes the mbox cmd buffer),
- * we need this to be persistent as the mbox cmd submission may be
- * queued and executed later execution.
- */
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- ctx->callback = callback;
- ctx->arg = arg;
- /* Build and send a NOP mailbox command */
- if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
- efc_log_err(hw->os, "COMMON_NOP format failure\n");
- kfree(ctx);
- return -EIO;
- }
- rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
- ctx);
- if (rc) {
- efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
- kfree(ctx);
- return -EIO;
- }
- return 0;
- }
- static int
- efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
- {
- struct sli4_cmd_sli_config *mbox_rsp =
- (struct sli4_cmd_sli_config *)mqe;
- struct sli4_rsp_cmn_write_object *wr_obj_rsp;
- struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
- u32 bytes_written;
- u16 mbox_status;
- u32 change_status;
- wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
- &mbox_rsp->payload.embed;
- bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
- mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
- change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
- RSP_CHANGE_STATUS);
- if (cb_arg) {
- if (cb_arg->cb) {
- if (!status && mbox_status)
- status = mbox_status;
- cb_arg->cb(status, bytes_written, change_status,
- cb_arg->arg);
- }
- kfree(cb_arg);
- }
- return 0;
- }
- int
- efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
- u32 offset, int last,
- void (*cb)(int status, u32 bytes_written,
- u32 change_status, void *arg),
- void *arg)
- {
- int rc = -EIO;
- u8 mbxdata[SLI4_BMBX_SIZE];
- struct efct_hw_fw_wr_cb_arg *cb_arg;
- int noc = 0;
- cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
- if (!cb_arg)
- return -ENOMEM;
- cb_arg->cb = cb;
- cb_arg->arg = arg;
- /* Write a portion of a firmware image to the device */
- if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
- noc, last, size, offset, "/prg/",
- dma))
- rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
- efct_hw_cb_fw_write, cb_arg);
- if (rc != 0) {
- efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
- kfree(cb_arg);
- }
- return rc;
- }
- static int
- efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
- void *arg)
- {
- return 0;
- }
- int
- efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
- uintptr_t value,
- void (*cb)(int status, uintptr_t value, void *arg),
- void *arg)
- {
- int rc = -EIO;
- u8 link[SLI4_BMBX_SIZE];
- u32 speed = 0;
- u8 reset_alpa = 0;
- switch (ctrl) {
- case EFCT_HW_PORT_INIT:
- if (!sli_cmd_config_link(&hw->sli, link))
- rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
- efct_hw_cb_port_control, NULL);
- if (rc != 0) {
- efc_log_err(hw->os, "CONFIG_LINK failed\n");
- break;
- }
- speed = hw->config.speed;
- reset_alpa = (u8)(value & 0xff);
- rc = -EIO;
- if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
- rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
- efct_hw_cb_port_control, NULL);
- /* Free buffer on error, since no callback is coming */
- if (rc)
- efc_log_err(hw->os, "INIT_LINK failed\n");
- break;
- case EFCT_HW_PORT_SHUTDOWN:
- if (!sli_cmd_down_link(&hw->sli, link))
- rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
- efct_hw_cb_port_control, NULL);
- /* Free buffer on error, since no callback is coming */
- if (rc)
- efc_log_err(hw->os, "DOWN_LINK failed\n");
- break;
- default:
- efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
- break;
- }
- return rc;
- }
- void
- efct_hw_teardown(struct efct_hw *hw)
- {
- u32 i = 0;
- u32 destroy_queues;
- u32 free_memory;
- struct efc_dma *dma;
- struct efct *efct = hw->os;
- destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
- free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
- /* Cancel Sliport Healthcheck */
- if (hw->sliport_healthcheck) {
- hw->sliport_healthcheck = 0;
- efct_hw_config_sli_port_health_check(hw, 0, 0);
- }
- if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
- hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
- efct_hw_flush(hw);
- if (list_empty(&hw->cmd_head))
- efc_log_debug(hw->os,
- "All commands completed on MQ queue\n");
- else
- efc_log_debug(hw->os,
- "Some cmds still pending on MQ queue\n");
- /* Cancel any remaining commands */
- efct_hw_command_cancel(hw);
- } else {
- hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
- }
- dma_free_coherent(&efct->pci->dev,
- hw->rnode_mem.size, hw->rnode_mem.virt,
- hw->rnode_mem.phys);
- memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
- if (hw->io) {
- for (i = 0; i < hw->config.n_io; i++) {
- if (hw->io[i] && hw->io[i]->sgl &&
- hw->io[i]->sgl->virt) {
- dma_free_coherent(&efct->pci->dev,
- hw->io[i]->sgl->size,
- hw->io[i]->sgl->virt,
- hw->io[i]->sgl->phys);
- }
- kfree(hw->io[i]);
- hw->io[i] = NULL;
- }
- kfree(hw->io);
- hw->io = NULL;
- kfree(hw->wqe_buffs);
- hw->wqe_buffs = NULL;
- }
- dma = &hw->xfer_rdy;
- dma_free_coherent(&efct->pci->dev,
- dma->size, dma->virt, dma->phys);
- memset(dma, 0, sizeof(struct efc_dma));
- dma = &hw->loop_map;
- dma_free_coherent(&efct->pci->dev,
- dma->size, dma->virt, dma->phys);
- memset(dma, 0, sizeof(struct efc_dma));
- for (i = 0; i < hw->wq_count; i++)
- sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
- free_memory);
- for (i = 0; i < hw->rq_count; i++)
- sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
- free_memory);
- for (i = 0; i < hw->mq_count; i++)
- sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
- free_memory);
- for (i = 0; i < hw->cq_count; i++)
- sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
- free_memory);
- for (i = 0; i < hw->eq_count; i++)
- sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
- free_memory);
- /* Free rq buffers */
- efct_hw_rx_free(hw);
- efct_hw_queue_teardown(hw);
- kfree(hw->wq_cpu_array);
- sli_teardown(&hw->sli);
- /* record the fact that the queues are non-functional */
- hw->state = EFCT_HW_STATE_UNINITIALIZED;
- /* free sequence free pool */
- kfree(hw->seq_pool);
- hw->seq_pool = NULL;
- /* free hw_wq_callback pool */
- efct_hw_reqtag_pool_free(hw);
- mempool_destroy(hw->cmd_ctx_pool);
- mempool_destroy(hw->mbox_rqst_pool);
- /* Mark HW setup as not having been called */
- hw->hw_setup_called = false;
- }
- static int
- efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
- enum efct_hw_state prev_state)
- {
- int rc = 0;
- switch (reset) {
- case EFCT_HW_RESET_FUNCTION:
- efc_log_debug(hw->os, "issuing function level reset\n");
- if (sli_reset(&hw->sli)) {
- efc_log_err(hw->os, "sli_reset failed\n");
- rc = -EIO;
- }
- break;
- case EFCT_HW_RESET_FIRMWARE:
- efc_log_debug(hw->os, "issuing firmware reset\n");
- if (sli_fw_reset(&hw->sli)) {
- efc_log_err(hw->os, "sli_soft_reset failed\n");
- rc = -EIO;
- }
- /*
- * Because the FW reset leaves the FW in a non-running state,
- * follow that with a regular reset.
- */
- efc_log_debug(hw->os, "issuing function level reset\n");
- if (sli_reset(&hw->sli)) {
- efc_log_err(hw->os, "sli_reset failed\n");
- rc = -EIO;
- }
- break;
- default:
- efc_log_err(hw->os, "unknown type - no reset performed\n");
- hw->state = prev_state;
- rc = -EINVAL;
- break;
- }
- return rc;
- }
- int
- efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
- {
- int rc = 0;
- enum efct_hw_state prev_state = hw->state;
- if (hw->state != EFCT_HW_STATE_ACTIVE)
- efc_log_debug(hw->os,
- "HW state %d is not active\n", hw->state);
- hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
- /*
- * If the prev_state is already reset/teardown in progress,
- * don't continue further
- */
- if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
- prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
- return efct_hw_sli_reset(hw, reset, prev_state);
- if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
- efct_hw_flush(hw);
- if (list_empty(&hw->cmd_head))
- efc_log_debug(hw->os,
- "All commands completed on MQ queue\n");
- else
- efc_log_err(hw->os,
- "Some commands still pending on MQ queue\n");
- }
- /* Reset the chip */
- rc = efct_hw_sli_reset(hw, reset, prev_state);
- if (rc == -EINVAL)
- return -EIO;
- return rc;
- }
|