123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * NVMe over Fabrics TCP host.
- * Copyright (c) 2018 Lightbits Labs. All rights reserved.
- */
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/slab.h>
- #include <linux/err.h>
- #include <linux/nvme-tcp.h>
- #include <net/sock.h>
- #include <net/tcp.h>
- #include <linux/blk-mq.h>
- #include <crypto/hash.h>
- #include <net/busy_poll.h>
- #include "nvme.h"
- #include "fabrics.h"
- struct nvme_tcp_queue;
- /* Define the socket priority to use for connections were it is desirable
- * that the NIC consider performing optimized packet processing or filtering.
- * A non-zero value being sufficient to indicate general consideration of any
- * possible optimization. Making it a module param allows for alternative
- * values that may be unique for some NIC implementations.
- */
- static int so_priority;
- module_param(so_priority, int, 0644);
- MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /* lockdep can detect a circular dependency of the form
- * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
- * because dependencies are tracked for both nvme-tcp and user contexts. Using
- * a separate class prevents lockdep from conflating nvme-tcp socket use with
- * user-space socket API use.
- */
- static struct lock_class_key nvme_tcp_sk_key[2];
- static struct lock_class_key nvme_tcp_slock_key[2];
- static void nvme_tcp_reclassify_socket(struct socket *sock)
- {
- struct sock *sk = sock->sk;
- if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
- return;
- switch (sk->sk_family) {
- case AF_INET:
- sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
- &nvme_tcp_slock_key[0],
- "sk_lock-AF_INET-NVME",
- &nvme_tcp_sk_key[0]);
- break;
- case AF_INET6:
- sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
- &nvme_tcp_slock_key[1],
- "sk_lock-AF_INET6-NVME",
- &nvme_tcp_sk_key[1]);
- break;
- default:
- WARN_ON_ONCE(1);
- }
- }
- #else
- static void nvme_tcp_reclassify_socket(struct socket *sock) { }
- #endif
- enum nvme_tcp_send_state {
- NVME_TCP_SEND_CMD_PDU = 0,
- NVME_TCP_SEND_H2C_PDU,
- NVME_TCP_SEND_DATA,
- NVME_TCP_SEND_DDGST,
- };
- struct nvme_tcp_request {
- struct nvme_request req;
- void *pdu;
- struct nvme_tcp_queue *queue;
- u32 data_len;
- u32 pdu_len;
- u32 pdu_sent;
- u32 h2cdata_left;
- u32 h2cdata_offset;
- u16 ttag;
- __le16 status;
- struct list_head entry;
- struct llist_node lentry;
- __le32 ddgst;
- struct bio *curr_bio;
- struct iov_iter iter;
- /* send state */
- size_t offset;
- size_t data_sent;
- enum nvme_tcp_send_state state;
- };
- enum nvme_tcp_queue_flags {
- NVME_TCP_Q_ALLOCATED = 0,
- NVME_TCP_Q_LIVE = 1,
- NVME_TCP_Q_POLLING = 2,
- };
- enum nvme_tcp_recv_state {
- NVME_TCP_RECV_PDU = 0,
- NVME_TCP_RECV_DATA,
- NVME_TCP_RECV_DDGST,
- };
- struct nvme_tcp_ctrl;
- struct nvme_tcp_queue {
- struct socket *sock;
- struct work_struct io_work;
- int io_cpu;
- struct mutex queue_lock;
- struct mutex send_mutex;
- struct llist_head req_list;
- struct list_head send_list;
- /* recv state */
- void *pdu;
- int pdu_remaining;
- int pdu_offset;
- size_t data_remaining;
- size_t ddgst_remaining;
- unsigned int nr_cqe;
- /* send state */
- struct nvme_tcp_request *request;
- u32 maxh2cdata;
- size_t cmnd_capsule_len;
- struct nvme_tcp_ctrl *ctrl;
- unsigned long flags;
- bool rd_enabled;
- bool hdr_digest;
- bool data_digest;
- struct ahash_request *rcv_hash;
- struct ahash_request *snd_hash;
- __le32 exp_ddgst;
- __le32 recv_ddgst;
- struct page_frag_cache pf_cache;
- void (*state_change)(struct sock *);
- void (*data_ready)(struct sock *);
- void (*write_space)(struct sock *);
- };
- struct nvme_tcp_ctrl {
- /* read only in the hot path */
- struct nvme_tcp_queue *queues;
- struct blk_mq_tag_set tag_set;
- /* other member variables */
- struct list_head list;
- struct blk_mq_tag_set admin_tag_set;
- struct sockaddr_storage addr;
- struct sockaddr_storage src_addr;
- struct nvme_ctrl ctrl;
- struct work_struct err_work;
- struct delayed_work connect_work;
- struct nvme_tcp_request async_req;
- u32 io_queues[HCTX_MAX_TYPES];
- };
- static LIST_HEAD(nvme_tcp_ctrl_list);
- static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
- static struct workqueue_struct *nvme_tcp_wq;
- static const struct blk_mq_ops nvme_tcp_mq_ops;
- static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
- static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
- static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
- {
- return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
- }
- static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
- {
- return queue - queue->ctrl->queues;
- }
- static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
- {
- u32 queue_idx = nvme_tcp_queue_id(queue);
- if (queue_idx == 0)
- return queue->ctrl->admin_tag_set.tags[queue_idx];
- return queue->ctrl->tag_set.tags[queue_idx - 1];
- }
- static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
- {
- return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
- }
- static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
- {
- return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
- }
- static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
- {
- if (nvme_is_fabrics(req->req.cmd))
- return NVME_TCP_ADMIN_CCSZ;
- return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
- }
- static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
- {
- return req == &req->queue->ctrl->async_req;
- }
- static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
- {
- struct request *rq;
- if (unlikely(nvme_tcp_async_req(req)))
- return false; /* async events don't have a request */
- rq = blk_mq_rq_from_pdu(req);
- return rq_data_dir(rq) == WRITE && req->data_len &&
- req->data_len <= nvme_tcp_inline_data_size(req);
- }
- static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
- {
- return req->iter.bvec->bv_page;
- }
- static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
- {
- return req->iter.bvec->bv_offset + req->iter.iov_offset;
- }
- static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
- {
- return min_t(size_t, iov_iter_single_seg_count(&req->iter),
- req->pdu_len - req->pdu_sent);
- }
- static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
- {
- return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
- req->pdu_len - req->pdu_sent : 0;
- }
- static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
- int len)
- {
- return nvme_tcp_pdu_data_left(req) <= len;
- }
- static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
- unsigned int dir)
- {
- struct request *rq = blk_mq_rq_from_pdu(req);
- struct bio_vec *vec;
- unsigned int size;
- int nr_bvec;
- size_t offset;
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
- vec = &rq->special_vec;
- nr_bvec = 1;
- size = blk_rq_payload_bytes(rq);
- offset = 0;
- } else {
- struct bio *bio = req->curr_bio;
- struct bvec_iter bi;
- struct bio_vec bv;
- vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- nr_bvec = 0;
- bio_for_each_bvec(bv, bio, bi) {
- nr_bvec++;
- }
- size = bio->bi_iter.bi_size;
- offset = bio->bi_iter.bi_bvec_done;
- }
- iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
- req->iter.iov_offset = offset;
- }
- static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
- int len)
- {
- req->data_sent += len;
- req->pdu_sent += len;
- iov_iter_advance(&req->iter, len);
- if (!iov_iter_count(&req->iter) &&
- req->data_sent < req->data_len) {
- req->curr_bio = req->curr_bio->bi_next;
- nvme_tcp_init_iter(req, ITER_SOURCE);
- }
- }
- static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
- {
- int ret;
- /* drain the send queue as much as we can... */
- do {
- ret = nvme_tcp_try_send(queue);
- } while (ret > 0);
- }
- static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
- {
- return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list);
- }
- static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
- bool sync, bool last)
- {
- struct nvme_tcp_queue *queue = req->queue;
- bool empty;
- empty = llist_add(&req->lentry, &queue->req_list) &&
- list_empty(&queue->send_list) && !queue->request;
- /*
- * if we're the first on the send_list and we can try to send
- * directly, otherwise queue io_work. Also, only do that if we
- * are on the same cpu, so we don't introduce contention.
- */
- if (queue->io_cpu == raw_smp_processor_id() &&
- sync && empty && mutex_trylock(&queue->send_mutex)) {
- nvme_tcp_send_all(queue);
- mutex_unlock(&queue->send_mutex);
- }
- if (last && nvme_tcp_queue_more(queue))
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- }
- static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_request *req;
- struct llist_node *node;
- for (node = llist_del_all(&queue->req_list); node; node = node->next) {
- req = llist_entry(node, struct nvme_tcp_request, lentry);
- list_add(&req->entry, &queue->send_list);
- }
- }
- static inline struct nvme_tcp_request *
- nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_request *req;
- req = list_first_entry_or_null(&queue->send_list,
- struct nvme_tcp_request, entry);
- if (!req) {
- nvme_tcp_process_req_list(queue);
- req = list_first_entry_or_null(&queue->send_list,
- struct nvme_tcp_request, entry);
- if (unlikely(!req))
- return NULL;
- }
- list_del(&req->entry);
- return req;
- }
- static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
- __le32 *dgst)
- {
- ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
- crypto_ahash_final(hash);
- }
- static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
- struct page *page, off_t off, size_t len)
- {
- struct scatterlist sg;
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, len, off);
- ahash_request_set_crypt(hash, &sg, NULL, len);
- crypto_ahash_update(hash);
- }
- static inline void nvme_tcp_hdgst(struct ahash_request *hash,
- void *pdu, size_t len)
- {
- struct scatterlist sg;
- sg_init_one(&sg, pdu, len);
- ahash_request_set_crypt(hash, &sg, pdu + len, len);
- crypto_ahash_digest(hash);
- }
- static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
- void *pdu, size_t pdu_len)
- {
- struct nvme_tcp_hdr *hdr = pdu;
- __le32 recv_digest;
- __le32 exp_digest;
- if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d: header digest flag is cleared\n",
- nvme_tcp_queue_id(queue));
- return -EPROTO;
- }
- recv_digest = *(__le32 *)(pdu + hdr->hlen);
- nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
- exp_digest = *(__le32 *)(pdu + hdr->hlen);
- if (recv_digest != exp_digest) {
- dev_err(queue->ctrl->ctrl.device,
- "header digest error: recv %#x expected %#x\n",
- le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
- return -EIO;
- }
- return 0;
- }
- static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
- {
- struct nvme_tcp_hdr *hdr = pdu;
- u8 digest_len = nvme_tcp_hdgst_len(queue);
- u32 len;
- len = le32_to_cpu(hdr->plen) - hdr->hlen -
- ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
- if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d: data digest flag is cleared\n",
- nvme_tcp_queue_id(queue));
- return -EPROTO;
- }
- crypto_ahash_init(queue->rcv_hash);
- return 0;
- }
- static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
- struct request *rq, unsigned int hctx_idx)
- {
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- page_frag_free(req->pdu);
- }
- static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
- struct request *rq, unsigned int hctx_idx,
- unsigned int numa_node)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu;
- int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
- struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
- u8 hdgst = nvme_tcp_hdgst_len(queue);
- req->pdu = page_frag_alloc(&queue->pf_cache,
- sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
- GFP_KERNEL | __GFP_ZERO);
- if (!req->pdu)
- return -ENOMEM;
- pdu = req->pdu;
- req->queue = queue;
- nvme_req(rq)->ctrl = &ctrl->ctrl;
- nvme_req(rq)->cmd = &pdu->cmd;
- return 0;
- }
- static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
- struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
- hctx->driver_data = queue;
- return 0;
- }
- static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
- struct nvme_tcp_queue *queue = &ctrl->queues[0];
- hctx->driver_data = queue;
- return 0;
- }
- static enum nvme_tcp_recv_state
- nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
- {
- return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
- (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
- NVME_TCP_RECV_DATA;
- }
- static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
- {
- queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
- nvme_tcp_hdgst_len(queue);
- queue->pdu_offset = 0;
- queue->data_remaining = -1;
- queue->ddgst_remaining = 0;
- }
- static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
- {
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
- return;
- dev_warn(ctrl->device, "starting error recovery\n");
- queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
- }
- static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
- struct nvme_completion *cqe)
- {
- struct nvme_tcp_request *req;
- struct request *rq;
- rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
- if (!rq) {
- dev_err(queue->ctrl->ctrl.device,
- "got bad cqe.command_id %#x on queue %d\n",
- cqe->command_id, nvme_tcp_queue_id(queue));
- nvme_tcp_error_recovery(&queue->ctrl->ctrl);
- return -EINVAL;
- }
- req = blk_mq_rq_to_pdu(rq);
- if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
- req->status = cqe->status;
- if (!nvme_try_complete_req(rq, req->status, cqe->result))
- nvme_complete_rq(rq);
- queue->nr_cqe++;
- return 0;
- }
- static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
- struct nvme_tcp_data_pdu *pdu)
- {
- struct request *rq;
- rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
- if (!rq) {
- dev_err(queue->ctrl->ctrl.device,
- "got bad c2hdata.command_id %#x on queue %d\n",
- pdu->command_id, nvme_tcp_queue_id(queue));
- return -ENOENT;
- }
- if (!blk_rq_payload_bytes(rq)) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x unexpected data\n",
- nvme_tcp_queue_id(queue), rq->tag);
- return -EIO;
- }
- queue->data_remaining = le32_to_cpu(pdu->data_length);
- if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
- unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x SUCCESS set but not last PDU\n",
- nvme_tcp_queue_id(queue), rq->tag);
- nvme_tcp_error_recovery(&queue->ctrl->ctrl);
- return -EPROTO;
- }
- return 0;
- }
- static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
- struct nvme_tcp_rsp_pdu *pdu)
- {
- struct nvme_completion *cqe = &pdu->cqe;
- int ret = 0;
- /*
- * AEN requests are special as they don't time out and can
- * survive any kind of queue freeze and often don't respond to
- * aborts. We don't even bother to allocate a struct request
- * for them but rather special case them here.
- */
- if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
- cqe->command_id)))
- nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
- &cqe->result);
- else
- ret = nvme_tcp_process_nvme_cqe(queue, cqe);
- return ret;
- }
- static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
- {
- struct nvme_tcp_data_pdu *data = req->pdu;
- struct nvme_tcp_queue *queue = req->queue;
- struct request *rq = blk_mq_rq_from_pdu(req);
- u32 h2cdata_sent = req->pdu_len;
- u8 hdgst = nvme_tcp_hdgst_len(queue);
- u8 ddgst = nvme_tcp_ddgst_len(queue);
- req->state = NVME_TCP_SEND_H2C_PDU;
- req->offset = 0;
- req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
- req->pdu_sent = 0;
- req->h2cdata_left -= req->pdu_len;
- req->h2cdata_offset += h2cdata_sent;
- memset(data, 0, sizeof(*data));
- data->hdr.type = nvme_tcp_h2c_data;
- if (!req->h2cdata_left)
- data->hdr.flags = NVME_TCP_F_DATA_LAST;
- if (queue->hdr_digest)
- data->hdr.flags |= NVME_TCP_F_HDGST;
- if (queue->data_digest)
- data->hdr.flags |= NVME_TCP_F_DDGST;
- data->hdr.hlen = sizeof(*data);
- data->hdr.pdo = data->hdr.hlen + hdgst;
- data->hdr.plen =
- cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
- data->ttag = req->ttag;
- data->command_id = nvme_cid(rq);
- data->data_offset = cpu_to_le32(req->h2cdata_offset);
- data->data_length = cpu_to_le32(req->pdu_len);
- }
- static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
- struct nvme_tcp_r2t_pdu *pdu)
- {
- struct nvme_tcp_request *req;
- struct request *rq;
- u32 r2t_length = le32_to_cpu(pdu->r2t_length);
- u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
- rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
- if (!rq) {
- dev_err(queue->ctrl->ctrl.device,
- "got bad r2t.command_id %#x on queue %d\n",
- pdu->command_id, nvme_tcp_queue_id(queue));
- return -ENOENT;
- }
- req = blk_mq_rq_to_pdu(rq);
- if (unlikely(!r2t_length)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len is %u, probably a bug...\n",
- rq->tag, r2t_length);
- return -EPROTO;
- }
- if (unlikely(req->data_sent + r2t_length > req->data_len)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len %u exceeded data len %u (%zu sent)\n",
- rq->tag, r2t_length, req->data_len, req->data_sent);
- return -EPROTO;
- }
- if (unlikely(r2t_offset < req->data_sent)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, r2t_offset, req->data_sent);
- return -EPROTO;
- }
- req->pdu_len = 0;
- req->h2cdata_left = r2t_length;
- req->h2cdata_offset = r2t_offset;
- req->ttag = pdu->ttag;
- nvme_tcp_setup_h2c_data_pdu(req);
- nvme_tcp_queue_request(req, false, true);
- return 0;
- }
- static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
- unsigned int *offset, size_t *len)
- {
- struct nvme_tcp_hdr *hdr;
- char *pdu = queue->pdu;
- size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
- int ret;
- ret = skb_copy_bits(skb, *offset,
- &pdu[queue->pdu_offset], rcv_len);
- if (unlikely(ret))
- return ret;
- queue->pdu_remaining -= rcv_len;
- queue->pdu_offset += rcv_len;
- *offset += rcv_len;
- *len -= rcv_len;
- if (queue->pdu_remaining)
- return 0;
- hdr = queue->pdu;
- if (queue->hdr_digest) {
- ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
- if (unlikely(ret))
- return ret;
- }
- if (queue->data_digest) {
- ret = nvme_tcp_check_ddgst(queue, queue->pdu);
- if (unlikely(ret))
- return ret;
- }
- switch (hdr->type) {
- case nvme_tcp_c2h_data:
- return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
- case nvme_tcp_rsp:
- nvme_tcp_init_recv_ctx(queue);
- return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
- case nvme_tcp_r2t:
- nvme_tcp_init_recv_ctx(queue);
- return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
- default:
- dev_err(queue->ctrl->ctrl.device,
- "unsupported pdu type (%d)\n", hdr->type);
- return -EINVAL;
- }
- }
- static inline void nvme_tcp_end_request(struct request *rq, u16 status)
- {
- union nvme_result res = {};
- if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
- nvme_complete_rq(rq);
- }
- static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
- unsigned int *offset, size_t *len)
- {
- struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
- struct request *rq =
- nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- while (true) {
- int recv_len, ret;
- recv_len = min_t(size_t, *len, queue->data_remaining);
- if (!recv_len)
- break;
- if (!iov_iter_count(&req->iter)) {
- req->curr_bio = req->curr_bio->bi_next;
- /*
- * If we don`t have any bios it means that controller
- * sent more data than we requested, hence error
- */
- if (!req->curr_bio) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d no space in request %#x",
- nvme_tcp_queue_id(queue), rq->tag);
- nvme_tcp_init_recv_ctx(queue);
- return -EIO;
- }
- nvme_tcp_init_iter(req, ITER_DEST);
- }
- /* we can read only from what is left in this bio */
- recv_len = min_t(size_t, recv_len,
- iov_iter_count(&req->iter));
- if (queue->data_digest)
- ret = skb_copy_and_hash_datagram_iter(skb, *offset,
- &req->iter, recv_len, queue->rcv_hash);
- else
- ret = skb_copy_datagram_iter(skb, *offset,
- &req->iter, recv_len);
- if (ret) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d failed to copy request %#x data",
- nvme_tcp_queue_id(queue), rq->tag);
- return ret;
- }
- *len -= recv_len;
- *offset += recv_len;
- queue->data_remaining -= recv_len;
- }
- if (!queue->data_remaining) {
- if (queue->data_digest) {
- nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
- queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
- } else {
- if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- nvme_tcp_end_request(rq,
- le16_to_cpu(req->status));
- queue->nr_cqe++;
- }
- nvme_tcp_init_recv_ctx(queue);
- }
- }
- return 0;
- }
- static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
- struct sk_buff *skb, unsigned int *offset, size_t *len)
- {
- struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
- char *ddgst = (char *)&queue->recv_ddgst;
- size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
- off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
- int ret;
- ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
- if (unlikely(ret))
- return ret;
- queue->ddgst_remaining -= recv_len;
- *offset += recv_len;
- *len -= recv_len;
- if (queue->ddgst_remaining)
- return 0;
- if (queue->recv_ddgst != queue->exp_ddgst) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
- dev_err(queue->ctrl->ctrl.device,
- "data digest error: recv %#x expected %#x\n",
- le32_to_cpu(queue->recv_ddgst),
- le32_to_cpu(queue->exp_ddgst));
- }
- if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- nvme_tcp_end_request(rq, le16_to_cpu(req->status));
- queue->nr_cqe++;
- }
- nvme_tcp_init_recv_ctx(queue);
- return 0;
- }
- static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
- unsigned int offset, size_t len)
- {
- struct nvme_tcp_queue *queue = desc->arg.data;
- size_t consumed = len;
- int result;
- while (len) {
- switch (nvme_tcp_recv_state(queue)) {
- case NVME_TCP_RECV_PDU:
- result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
- break;
- case NVME_TCP_RECV_DATA:
- result = nvme_tcp_recv_data(queue, skb, &offset, &len);
- break;
- case NVME_TCP_RECV_DDGST:
- result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
- break;
- default:
- result = -EFAULT;
- }
- if (result) {
- dev_err(queue->ctrl->ctrl.device,
- "receive failed: %d\n", result);
- queue->rd_enabled = false;
- nvme_tcp_error_recovery(&queue->ctrl->ctrl);
- return result;
- }
- }
- return consumed;
- }
- static void nvme_tcp_data_ready(struct sock *sk)
- {
- struct nvme_tcp_queue *queue;
- read_lock_bh(&sk->sk_callback_lock);
- queue = sk->sk_user_data;
- if (likely(queue && queue->rd_enabled) &&
- !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- read_unlock_bh(&sk->sk_callback_lock);
- }
- static void nvme_tcp_write_space(struct sock *sk)
- {
- struct nvme_tcp_queue *queue;
- read_lock_bh(&sk->sk_callback_lock);
- queue = sk->sk_user_data;
- if (likely(queue && sk_stream_is_writeable(sk))) {
- clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- }
- read_unlock_bh(&sk->sk_callback_lock);
- }
- static void nvme_tcp_state_change(struct sock *sk)
- {
- struct nvme_tcp_queue *queue;
- read_lock_bh(&sk->sk_callback_lock);
- queue = sk->sk_user_data;
- if (!queue)
- goto done;
- switch (sk->sk_state) {
- case TCP_CLOSE:
- case TCP_CLOSE_WAIT:
- case TCP_LAST_ACK:
- case TCP_FIN_WAIT1:
- case TCP_FIN_WAIT2:
- nvme_tcp_error_recovery(&queue->ctrl->ctrl);
- break;
- default:
- dev_info(queue->ctrl->ctrl.device,
- "queue %d socket state %d\n",
- nvme_tcp_queue_id(queue), sk->sk_state);
- }
- queue->state_change(sk);
- done:
- read_unlock_bh(&sk->sk_callback_lock);
- }
- static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
- {
- queue->request = NULL;
- }
- static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
- {
- if (nvme_tcp_async_req(req)) {
- union nvme_result res = {};
- nvme_complete_async_event(&req->queue->ctrl->ctrl,
- cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
- } else {
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
- NVME_SC_HOST_PATH_ERROR);
- }
- }
- static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
- {
- struct nvme_tcp_queue *queue = req->queue;
- int req_data_len = req->data_len;
- u32 h2cdata_left = req->h2cdata_left;
- while (true) {
- struct page *page = nvme_tcp_req_cur_page(req);
- size_t offset = nvme_tcp_req_cur_offset(req);
- size_t len = nvme_tcp_req_cur_length(req);
- bool last = nvme_tcp_pdu_last_send(req, len);
- int req_data_sent = req->data_sent;
- int ret, flags = MSG_DONTWAIT;
- if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
- flags |= MSG_EOR;
- else
- flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
- if (sendpage_ok(page)) {
- ret = kernel_sendpage(queue->sock, page, offset, len,
- flags);
- } else {
- ret = sock_no_sendpage(queue->sock, page, offset, len,
- flags);
- }
- if (ret <= 0)
- return ret;
- if (queue->data_digest)
- nvme_tcp_ddgst_update(queue->snd_hash, page,
- offset, ret);
- /*
- * update the request iterator except for the last payload send
- * in the request where we don't want to modify it as we may
- * compete with the RX path completing the request.
- */
- if (req_data_sent + ret < req_data_len)
- nvme_tcp_advance_req(req, ret);
- /* fully successful last send in current PDU */
- if (last && ret == len) {
- if (queue->data_digest) {
- nvme_tcp_ddgst_final(queue->snd_hash,
- &req->ddgst);
- req->state = NVME_TCP_SEND_DDGST;
- req->offset = 0;
- } else {
- if (h2cdata_left)
- nvme_tcp_setup_h2c_data_pdu(req);
- else
- nvme_tcp_done_send_req(queue);
- }
- return 1;
- }
- }
- return -EAGAIN;
- }
- static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
- {
- struct nvme_tcp_queue *queue = req->queue;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- bool inline_data = nvme_tcp_has_inline_data(req);
- u8 hdgst = nvme_tcp_hdgst_len(queue);
- int len = sizeof(*pdu) + hdgst - req->offset;
- int flags = MSG_DONTWAIT;
- int ret;
- if (inline_data || nvme_tcp_queue_more(queue))
- flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
- else
- flags |= MSG_EOR;
- if (queue->hdr_digest && !req->offset)
- nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
- ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len, flags);
- if (unlikely(ret <= 0))
- return ret;
- len -= ret;
- if (!len) {
- if (inline_data) {
- req->state = NVME_TCP_SEND_DATA;
- if (queue->data_digest)
- crypto_ahash_init(queue->snd_hash);
- } else {
- nvme_tcp_done_send_req(queue);
- }
- return 1;
- }
- req->offset += ret;
- return -EAGAIN;
- }
- static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
- {
- struct nvme_tcp_queue *queue = req->queue;
- struct nvme_tcp_data_pdu *pdu = req->pdu;
- u8 hdgst = nvme_tcp_hdgst_len(queue);
- int len = sizeof(*pdu) - req->offset + hdgst;
- int ret;
- if (queue->hdr_digest && !req->offset)
- nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
- if (!req->h2cdata_left)
- ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
- else
- ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE);
- if (unlikely(ret <= 0))
- return ret;
- len -= ret;
- if (!len) {
- req->state = NVME_TCP_SEND_DATA;
- if (queue->data_digest)
- crypto_ahash_init(queue->snd_hash);
- return 1;
- }
- req->offset += ret;
- return -EAGAIN;
- }
- static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
- {
- struct nvme_tcp_queue *queue = req->queue;
- size_t offset = req->offset;
- u32 h2cdata_left = req->h2cdata_left;
- int ret;
- struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
- struct kvec iov = {
- .iov_base = (u8 *)&req->ddgst + req->offset,
- .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
- };
- if (nvme_tcp_queue_more(queue))
- msg.msg_flags |= MSG_MORE;
- else
- msg.msg_flags |= MSG_EOR;
- ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
- if (unlikely(ret <= 0))
- return ret;
- if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
- if (h2cdata_left)
- nvme_tcp_setup_h2c_data_pdu(req);
- else
- nvme_tcp_done_send_req(queue);
- return 1;
- }
- req->offset += ret;
- return -EAGAIN;
- }
- static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_request *req;
- unsigned int noreclaim_flag;
- int ret = 1;
- if (!queue->request) {
- queue->request = nvme_tcp_fetch_request(queue);
- if (!queue->request)
- return 0;
- }
- req = queue->request;
- noreclaim_flag = memalloc_noreclaim_save();
- if (req->state == NVME_TCP_SEND_CMD_PDU) {
- ret = nvme_tcp_try_send_cmd_pdu(req);
- if (ret <= 0)
- goto done;
- if (!nvme_tcp_has_inline_data(req))
- goto out;
- }
- if (req->state == NVME_TCP_SEND_H2C_PDU) {
- ret = nvme_tcp_try_send_data_pdu(req);
- if (ret <= 0)
- goto done;
- }
- if (req->state == NVME_TCP_SEND_DATA) {
- ret = nvme_tcp_try_send_data(req);
- if (ret <= 0)
- goto done;
- }
- if (req->state == NVME_TCP_SEND_DDGST)
- ret = nvme_tcp_try_send_ddgst(req);
- done:
- if (ret == -EAGAIN) {
- ret = 0;
- } else if (ret < 0) {
- dev_err(queue->ctrl->ctrl.device,
- "failed to send request %d\n", ret);
- nvme_tcp_fail_request(queue->request);
- nvme_tcp_done_send_req(queue);
- }
- out:
- memalloc_noreclaim_restore(noreclaim_flag);
- return ret;
- }
- static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
- {
- struct socket *sock = queue->sock;
- struct sock *sk = sock->sk;
- read_descriptor_t rd_desc;
- int consumed;
- rd_desc.arg.data = queue;
- rd_desc.count = 1;
- lock_sock(sk);
- queue->nr_cqe = 0;
- consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
- release_sock(sk);
- return consumed;
- }
- static void nvme_tcp_io_work(struct work_struct *w)
- {
- struct nvme_tcp_queue *queue =
- container_of(w, struct nvme_tcp_queue, io_work);
- unsigned long deadline = jiffies + msecs_to_jiffies(1);
- do {
- bool pending = false;
- int result;
- if (mutex_trylock(&queue->send_mutex)) {
- result = nvme_tcp_try_send(queue);
- mutex_unlock(&queue->send_mutex);
- if (result > 0)
- pending = true;
- else if (unlikely(result < 0))
- break;
- }
- result = nvme_tcp_try_recv(queue);
- if (result > 0)
- pending = true;
- else if (unlikely(result < 0))
- return;
- if (!pending || !queue->rd_enabled)
- return;
- } while (!time_after(jiffies, deadline)); /* quota is exhausted */
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- }
- static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
- {
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
- ahash_request_free(queue->rcv_hash);
- ahash_request_free(queue->snd_hash);
- crypto_free_ahash(tfm);
- }
- static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
- {
- struct crypto_ahash *tfm;
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
- queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!queue->snd_hash)
- goto free_tfm;
- ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
- queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!queue->rcv_hash)
- goto free_snd_hash;
- ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
- return 0;
- free_snd_hash:
- ahash_request_free(queue->snd_hash);
- free_tfm:
- crypto_free_ahash(tfm);
- return -ENOMEM;
- }
- static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
- {
- struct nvme_tcp_request *async = &ctrl->async_req;
- page_frag_free(async->pdu);
- }
- static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
- {
- struct nvme_tcp_queue *queue = &ctrl->queues[0];
- struct nvme_tcp_request *async = &ctrl->async_req;
- u8 hdgst = nvme_tcp_hdgst_len(queue);
- async->pdu = page_frag_alloc(&queue->pf_cache,
- sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
- GFP_KERNEL | __GFP_ZERO);
- if (!async->pdu)
- return -ENOMEM;
- async->queue = &ctrl->queues[0];
- return 0;
- }
- static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
- {
- struct page *page;
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- unsigned int noreclaim_flag;
- if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
- return;
- if (queue->hdr_digest || queue->data_digest)
- nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
- noreclaim_flag = memalloc_noreclaim_save();
- sock_release(queue->sock);
- memalloc_noreclaim_restore(noreclaim_flag);
- kfree(queue->pdu);
- mutex_destroy(&queue->send_mutex);
- mutex_destroy(&queue->queue_lock);
- }
- static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_icreq_pdu *icreq;
- struct nvme_tcp_icresp_pdu *icresp;
- struct msghdr msg = {};
- struct kvec iov;
- bool ctrl_hdgst, ctrl_ddgst;
- u32 maxh2cdata;
- int ret;
- icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
- if (!icreq)
- return -ENOMEM;
- icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
- if (!icresp) {
- ret = -ENOMEM;
- goto free_icreq;
- }
- icreq->hdr.type = nvme_tcp_icreq;
- icreq->hdr.hlen = sizeof(*icreq);
- icreq->hdr.pdo = 0;
- icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
- icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icreq->maxr2t = 0; /* single inflight r2t supported */
- icreq->hpda = 0; /* no alignment constraint */
- if (queue->hdr_digest)
- icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
- if (queue->data_digest)
- icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
- iov.iov_base = icreq;
- iov.iov_len = sizeof(*icreq);
- ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
- if (ret < 0)
- goto free_icresp;
- memset(&msg, 0, sizeof(msg));
- iov.iov_base = icresp;
- iov.iov_len = sizeof(*icresp);
- ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
- iov.iov_len, msg.msg_flags);
- if (ret < 0)
- goto free_icresp;
- ret = -EINVAL;
- if (icresp->hdr.type != nvme_tcp_icresp) {
- pr_err("queue %d: bad type returned %d\n",
- nvme_tcp_queue_id(queue), icresp->hdr.type);
- goto free_icresp;
- }
- if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
- pr_err("queue %d: bad pdu length returned %d\n",
- nvme_tcp_queue_id(queue), icresp->hdr.plen);
- goto free_icresp;
- }
- if (icresp->pfv != NVME_TCP_PFV_1_0) {
- pr_err("queue %d: bad pfv returned %d\n",
- nvme_tcp_queue_id(queue), icresp->pfv);
- goto free_icresp;
- }
- ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
- if ((queue->data_digest && !ctrl_ddgst) ||
- (!queue->data_digest && ctrl_ddgst)) {
- pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
- nvme_tcp_queue_id(queue),
- queue->data_digest ? "enabled" : "disabled",
- ctrl_ddgst ? "enabled" : "disabled");
- goto free_icresp;
- }
- ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
- if ((queue->hdr_digest && !ctrl_hdgst) ||
- (!queue->hdr_digest && ctrl_hdgst)) {
- pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
- nvme_tcp_queue_id(queue),
- queue->hdr_digest ? "enabled" : "disabled",
- ctrl_hdgst ? "enabled" : "disabled");
- goto free_icresp;
- }
- if (icresp->cpda != 0) {
- pr_err("queue %d: unsupported cpda returned %d\n",
- nvme_tcp_queue_id(queue), icresp->cpda);
- goto free_icresp;
- }
- maxh2cdata = le32_to_cpu(icresp->maxdata);
- if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
- pr_err("queue %d: invalid maxh2cdata returned %u\n",
- nvme_tcp_queue_id(queue), maxh2cdata);
- goto free_icresp;
- }
- queue->maxh2cdata = maxh2cdata;
- ret = 0;
- free_icresp:
- kfree(icresp);
- free_icreq:
- kfree(icreq);
- return ret;
- }
- static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
- {
- return nvme_tcp_queue_id(queue) == 0;
- }
- static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- return !nvme_tcp_admin_queue(queue) &&
- qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
- }
- static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- return !nvme_tcp_admin_queue(queue) &&
- !nvme_tcp_default_queue(queue) &&
- qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
- ctrl->io_queues[HCTX_TYPE_READ];
- }
- static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- return !nvme_tcp_admin_queue(queue) &&
- !nvme_tcp_default_queue(queue) &&
- !nvme_tcp_read_queue(queue) &&
- qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
- ctrl->io_queues[HCTX_TYPE_READ] +
- ctrl->io_queues[HCTX_TYPE_POLL];
- }
- static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
- {
- struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- int n = 0;
- if (nvme_tcp_default_queue(queue))
- n = qid - 1;
- else if (nvme_tcp_read_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
- else if (nvme_tcp_poll_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
- ctrl->io_queues[HCTX_TYPE_READ] - 1;
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
- }
- static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- int ret, rcv_pdu_size;
- mutex_init(&queue->queue_lock);
- queue->ctrl = ctrl;
- init_llist_head(&queue->req_list);
- INIT_LIST_HEAD(&queue->send_list);
- mutex_init(&queue->send_mutex);
- INIT_WORK(&queue->io_work, nvme_tcp_io_work);
- if (qid > 0)
- queue->cmnd_capsule_len = nctrl->ioccsz * 16;
- else
- queue->cmnd_capsule_len = sizeof(struct nvme_command) +
- NVME_TCP_ADMIN_CCSZ;
- ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
- IPPROTO_TCP, &queue->sock);
- if (ret) {
- dev_err(nctrl->device,
- "failed to create socket: %d\n", ret);
- goto err_destroy_mutex;
- }
- nvme_tcp_reclassify_socket(queue->sock);
- /* Single syn retry */
- tcp_sock_set_syncnt(queue->sock->sk, 1);
- /* Set TCP no delay */
- tcp_sock_set_nodelay(queue->sock->sk);
- /*
- * Cleanup whatever is sitting in the TCP transmit queue on socket
- * close. This is done to prevent stale data from being sent should
- * the network connection be restored before TCP times out.
- */
- sock_no_linger(queue->sock->sk);
- if (so_priority > 0)
- sock_set_priority(queue->sock->sk, so_priority);
- /* Set socket type of service */
- if (nctrl->opts->tos >= 0)
- ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
- /* Set 10 seconds timeout for icresp recvmsg */
- queue->sock->sk->sk_rcvtimeo = 10 * HZ;
- queue->sock->sk->sk_allocation = GFP_ATOMIC;
- nvme_tcp_set_queue_io_cpu(queue);
- queue->request = NULL;
- queue->data_remaining = 0;
- queue->ddgst_remaining = 0;
- queue->pdu_remaining = 0;
- queue->pdu_offset = 0;
- sk_set_memalloc(queue->sock->sk);
- if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
- ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
- sizeof(ctrl->src_addr));
- if (ret) {
- dev_err(nctrl->device,
- "failed to bind queue %d socket %d\n",
- qid, ret);
- goto err_sock;
- }
- }
- if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
- char *iface = nctrl->opts->host_iface;
- sockptr_t optval = KERNEL_SOCKPTR(iface);
- ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
- optval, strlen(iface));
- if (ret) {
- dev_err(nctrl->device,
- "failed to bind to interface %s queue %d err %d\n",
- iface, qid, ret);
- goto err_sock;
- }
- }
- queue->hdr_digest = nctrl->opts->hdr_digest;
- queue->data_digest = nctrl->opts->data_digest;
- if (queue->hdr_digest || queue->data_digest) {
- ret = nvme_tcp_alloc_crypto(queue);
- if (ret) {
- dev_err(nctrl->device,
- "failed to allocate queue %d crypto\n", qid);
- goto err_sock;
- }
- }
- rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
- nvme_tcp_hdgst_len(queue);
- queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
- if (!queue->pdu) {
- ret = -ENOMEM;
- goto err_crypto;
- }
- dev_dbg(nctrl->device, "connecting queue %d\n",
- nvme_tcp_queue_id(queue));
- ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
- sizeof(ctrl->addr), 0);
- if (ret) {
- dev_err(nctrl->device,
- "failed to connect socket: %d\n", ret);
- goto err_rcv_pdu;
- }
- ret = nvme_tcp_init_connection(queue);
- if (ret)
- goto err_init_connect;
- set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
- return 0;
- err_init_connect:
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- err_rcv_pdu:
- kfree(queue->pdu);
- err_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvme_tcp_free_crypto(queue);
- err_sock:
- sock_release(queue->sock);
- queue->sock = NULL;
- err_destroy_mutex:
- mutex_destroy(&queue->send_mutex);
- mutex_destroy(&queue->queue_lock);
- return ret;
- }
- static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
- {
- struct socket *sock = queue->sock;
- write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = NULL;
- sock->sk->sk_data_ready = queue->data_ready;
- sock->sk->sk_state_change = queue->state_change;
- sock->sk->sk_write_space = queue->write_space;
- write_unlock_bh(&sock->sk->sk_callback_lock);
- }
- static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
- {
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- nvme_tcp_restore_sock_ops(queue);
- cancel_work_sync(&queue->io_work);
- }
- static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
- return;
- mutex_lock(&queue->queue_lock);
- if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
- __nvme_tcp_stop_queue(queue);
- mutex_unlock(&queue->queue_lock);
- }
- static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
- {
- write_lock_bh(&queue->sock->sk->sk_callback_lock);
- queue->sock->sk->sk_user_data = queue;
- queue->state_change = queue->sock->sk->sk_state_change;
- queue->data_ready = queue->sock->sk->sk_data_ready;
- queue->write_space = queue->sock->sk->sk_write_space;
- queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
- queue->sock->sk->sk_state_change = nvme_tcp_state_change;
- queue->sock->sk->sk_write_space = nvme_tcp_write_space;
- #ifdef CONFIG_NET_RX_BUSY_POLL
- queue->sock->sk->sk_ll_usec = 1;
- #endif
- write_unlock_bh(&queue->sock->sk->sk_callback_lock);
- }
- static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvme_tcp_queue *queue = &ctrl->queues[idx];
- int ret;
- queue->rd_enabled = true;
- nvme_tcp_init_recv_ctx(queue);
- nvme_tcp_setup_sock_ops(queue);
- if (idx)
- ret = nvmf_connect_io_queue(nctrl, idx);
- else
- ret = nvmf_connect_admin_queue(nctrl);
- if (!ret) {
- set_bit(NVME_TCP_Q_LIVE, &queue->flags);
- } else {
- if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
- __nvme_tcp_stop_queue(queue);
- dev_err(nctrl->device,
- "failed to connect queue: %d ret=%d\n", idx, ret);
- }
- return ret;
- }
- static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
- {
- if (to_tcp_ctrl(ctrl)->async_req.pdu) {
- cancel_work_sync(&ctrl->async_event_work);
- nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
- to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
- }
- nvme_tcp_free_queue(ctrl, 0);
- }
- static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
- {
- int i;
- for (i = 1; i < ctrl->queue_count; i++)
- nvme_tcp_free_queue(ctrl, i);
- }
- static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
- {
- int i;
- for (i = 1; i < ctrl->queue_count; i++)
- nvme_tcp_stop_queue(ctrl, i);
- }
- static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
- int first, int last)
- {
- int i, ret;
- for (i = first; i < last; i++) {
- ret = nvme_tcp_start_queue(ctrl, i);
- if (ret)
- goto out_stop_queues;
- }
- return 0;
- out_stop_queues:
- for (i--; i >= first; i--)
- nvme_tcp_stop_queue(ctrl, i);
- return ret;
- }
- static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
- {
- int ret;
- ret = nvme_tcp_alloc_queue(ctrl, 0);
- if (ret)
- return ret;
- ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
- if (ret)
- goto out_free_queue;
- return 0;
- out_free_queue:
- nvme_tcp_free_queue(ctrl, 0);
- return ret;
- }
- static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
- {
- int i, ret;
- for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i);
- if (ret)
- goto out_free_queues;
- }
- return 0;
- out_free_queues:
- for (i--; i >= 1; i--)
- nvme_tcp_free_queue(ctrl, i);
- return ret;
- }
- static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
- {
- unsigned int nr_io_queues;
- nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
- nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
- nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
- return nr_io_queues;
- }
- static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
- unsigned int nr_io_queues)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvmf_ctrl_options *opts = nctrl->opts;
- if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
- /*
- * separate read/write queues
- * hand out dedicated default queues only after we have
- * sufficient read queues.
- */
- ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_write_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /*
- * shared read/write queues
- * either no write queues were requested, or we don't have
- * sufficient queue count to have dedicated default queues.
- */
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_io_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- }
- if (opts->nr_poll_queues && nr_io_queues) {
- /* map dedicated poll queues only if we have queues left */
- ctrl->io_queues[HCTX_TYPE_POLL] =
- min(opts->nr_poll_queues, nr_io_queues);
- }
- }
- static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
- {
- unsigned int nr_io_queues;
- int ret;
- nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
- ret = nvme_set_queue_count(ctrl, &nr_io_queues);
- if (ret)
- return ret;
- if (nr_io_queues == 0) {
- dev_err(ctrl->device,
- "unable to set any I/O queues\n");
- return -ENOMEM;
- }
- ctrl->queue_count = nr_io_queues + 1;
- dev_info(ctrl->device,
- "creating %d I/O queues.\n", nr_io_queues);
- nvme_tcp_set_io_queues(ctrl, nr_io_queues);
- return __nvme_tcp_alloc_io_queues(ctrl);
- }
- static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
- {
- nvme_tcp_stop_io_queues(ctrl);
- if (remove)
- nvme_remove_io_tag_set(ctrl);
- nvme_tcp_free_io_queues(ctrl);
- }
- static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
- {
- int ret, nr_queues;
- ret = nvme_tcp_alloc_io_queues(ctrl);
- if (ret)
- return ret;
- if (new) {
- ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
- &nvme_tcp_mq_ops,
- ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
- sizeof(struct nvme_tcp_request));
- if (ret)
- goto out_free_io_queues;
- }
- /*
- * Only start IO queues for which we have allocated the tagset
- * and limitted it to the available queues. On reconnects, the
- * queue number might have changed.
- */
- nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
- ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
- if (ret)
- goto out_cleanup_connect_q;
- if (!new) {
- nvme_start_freeze(ctrl);
- nvme_start_queues(ctrl);
- if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
- /*
- * If we timed out waiting for freeze we are likely to
- * be stuck. Fail the controller initialization just
- * to be safe.
- */
- ret = -ENODEV;
- nvme_unfreeze(ctrl);
- goto out_wait_freeze_timed_out;
- }
- blk_mq_update_nr_hw_queues(ctrl->tagset,
- ctrl->queue_count - 1);
- nvme_unfreeze(ctrl);
- }
- /*
- * If the number of queues has increased (reconnect case)
- * start all new queues now.
- */
- ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
- ctrl->tagset->nr_hw_queues + 1);
- if (ret)
- goto out_wait_freeze_timed_out;
- return 0;
- out_wait_freeze_timed_out:
- nvme_stop_queues(ctrl);
- nvme_sync_io_queues(ctrl);
- nvme_tcp_stop_io_queues(ctrl);
- out_cleanup_connect_q:
- nvme_cancel_tagset(ctrl);
- if (new)
- nvme_remove_io_tag_set(ctrl);
- out_free_io_queues:
- nvme_tcp_free_io_queues(ctrl);
- return ret;
- }
- static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
- {
- nvme_tcp_stop_queue(ctrl, 0);
- if (remove)
- nvme_remove_admin_tag_set(ctrl);
- nvme_tcp_free_admin_queue(ctrl);
- }
- static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
- {
- int error;
- error = nvme_tcp_alloc_admin_queue(ctrl);
- if (error)
- return error;
- if (new) {
- error = nvme_alloc_admin_tag_set(ctrl,
- &to_tcp_ctrl(ctrl)->admin_tag_set,
- &nvme_tcp_admin_mq_ops,
- sizeof(struct nvme_tcp_request));
- if (error)
- goto out_free_queue;
- }
- error = nvme_tcp_start_queue(ctrl, 0);
- if (error)
- goto out_cleanup_tagset;
- error = nvme_enable_ctrl(ctrl);
- if (error)
- goto out_stop_queue;
- nvme_start_admin_queue(ctrl);
- error = nvme_init_ctrl_finish(ctrl);
- if (error)
- goto out_quiesce_queue;
- return 0;
- out_quiesce_queue:
- nvme_stop_admin_queue(ctrl);
- blk_sync_queue(ctrl->admin_q);
- out_stop_queue:
- nvme_tcp_stop_queue(ctrl, 0);
- nvme_cancel_admin_tagset(ctrl);
- out_cleanup_tagset:
- if (new)
- nvme_remove_admin_tag_set(ctrl);
- out_free_queue:
- nvme_tcp_free_admin_queue(ctrl);
- return error;
- }
- static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
- bool remove)
- {
- nvme_stop_admin_queue(ctrl);
- blk_sync_queue(ctrl->admin_q);
- nvme_tcp_stop_queue(ctrl, 0);
- nvme_cancel_admin_tagset(ctrl);
- if (remove)
- nvme_start_admin_queue(ctrl);
- nvme_tcp_destroy_admin_queue(ctrl, remove);
- }
- static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
- bool remove)
- {
- if (ctrl->queue_count <= 1)
- return;
- nvme_stop_admin_queue(ctrl);
- nvme_stop_queues(ctrl);
- nvme_sync_io_queues(ctrl);
- nvme_tcp_stop_io_queues(ctrl);
- nvme_cancel_tagset(ctrl);
- if (remove)
- nvme_start_queues(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, remove);
- }
- static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
- {
- /* If we are resetting/deleting then do nothing */
- if (ctrl->state != NVME_CTRL_CONNECTING) {
- WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
- ctrl->state == NVME_CTRL_LIVE);
- return;
- }
- if (nvmf_should_reconnect(ctrl)) {
- dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
- ctrl->opts->reconnect_delay);
- queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
- ctrl->opts->reconnect_delay * HZ);
- } else {
- dev_info(ctrl->device, "Removing controller...\n");
- nvme_delete_ctrl(ctrl);
- }
- }
- static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
- {
- struct nvmf_ctrl_options *opts = ctrl->opts;
- int ret;
- ret = nvme_tcp_configure_admin_queue(ctrl, new);
- if (ret)
- return ret;
- if (ctrl->icdoff) {
- ret = -EOPNOTSUPP;
- dev_err(ctrl->device, "icdoff is not supported!\n");
- goto destroy_admin;
- }
- if (!nvme_ctrl_sgl_supported(ctrl)) {
- ret = -EOPNOTSUPP;
- dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
- goto destroy_admin;
- }
- if (opts->queue_size > ctrl->sqsize + 1)
- dev_warn(ctrl->device,
- "queue_size %zu > ctrl sqsize %u, clamping down\n",
- opts->queue_size, ctrl->sqsize + 1);
- if (ctrl->sqsize + 1 > ctrl->maxcmd) {
- dev_warn(ctrl->device,
- "sqsize %u > ctrl maxcmd %u, clamping down\n",
- ctrl->sqsize + 1, ctrl->maxcmd);
- ctrl->sqsize = ctrl->maxcmd - 1;
- }
- if (ctrl->queue_count > 1) {
- ret = nvme_tcp_configure_io_queues(ctrl, new);
- if (ret)
- goto destroy_admin;
- }
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
- /*
- * state change failure is ok if we started ctrl delete,
- * unless we're during creation of a new controller to
- * avoid races with teardown flow.
- */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
- WARN_ON_ONCE(new);
- ret = -EINVAL;
- goto destroy_io;
- }
- nvme_start_ctrl(ctrl);
- return 0;
- destroy_io:
- if (ctrl->queue_count > 1) {
- nvme_stop_queues(ctrl);
- nvme_sync_io_queues(ctrl);
- nvme_tcp_stop_io_queues(ctrl);
- nvme_cancel_tagset(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, new);
- }
- destroy_admin:
- nvme_stop_admin_queue(ctrl);
- blk_sync_queue(ctrl->admin_q);
- nvme_tcp_stop_queue(ctrl, 0);
- nvme_cancel_admin_tagset(ctrl);
- nvme_tcp_destroy_admin_queue(ctrl, new);
- return ret;
- }
- static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
- {
- struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
- struct nvme_tcp_ctrl, connect_work);
- struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
- ++ctrl->nr_reconnects;
- if (nvme_tcp_setup_ctrl(ctrl, false))
- goto requeue;
- dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
- ctrl->nr_reconnects);
- ctrl->nr_reconnects = 0;
- return;
- requeue:
- dev_info(ctrl->device, "Failed reconnect attempt %d\n",
- ctrl->nr_reconnects);
- nvme_tcp_reconnect_or_remove(ctrl);
- }
- static void nvme_tcp_error_recovery_work(struct work_struct *work)
- {
- struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
- struct nvme_tcp_ctrl, err_work);
- struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
- nvme_stop_keep_alive(ctrl);
- flush_work(&ctrl->async_event_work);
- nvme_tcp_teardown_io_queues(ctrl, false);
- /* unquiesce to fail fast pending requests */
- nvme_start_queues(ctrl);
- nvme_tcp_teardown_admin_queue(ctrl, false);
- nvme_start_admin_queue(ctrl);
- nvme_auth_stop(ctrl);
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
- return;
- }
- nvme_tcp_reconnect_or_remove(ctrl);
- }
- static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
- {
- nvme_tcp_teardown_io_queues(ctrl, shutdown);
- nvme_stop_admin_queue(ctrl);
- if (shutdown)
- nvme_shutdown_ctrl(ctrl);
- else
- nvme_disable_ctrl(ctrl);
- nvme_tcp_teardown_admin_queue(ctrl, shutdown);
- }
- static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
- {
- nvme_tcp_teardown_ctrl(ctrl, true);
- }
- static void nvme_reset_ctrl_work(struct work_struct *work)
- {
- struct nvme_ctrl *ctrl =
- container_of(work, struct nvme_ctrl, reset_work);
- nvme_stop_ctrl(ctrl);
- nvme_tcp_teardown_ctrl(ctrl, false);
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
- return;
- }
- if (nvme_tcp_setup_ctrl(ctrl, false))
- goto out_fail;
- return;
- out_fail:
- ++ctrl->nr_reconnects;
- nvme_tcp_reconnect_or_remove(ctrl);
- }
- static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
- {
- flush_work(&to_tcp_ctrl(ctrl)->err_work);
- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
- }
- static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- if (list_empty(&ctrl->list))
- goto free_ctrl;
- mutex_lock(&nvme_tcp_ctrl_mutex);
- list_del(&ctrl->list);
- mutex_unlock(&nvme_tcp_ctrl_mutex);
- nvmf_free_options(nctrl->opts);
- free_ctrl:
- kfree(ctrl->queues);
- kfree(ctrl);
- }
- static void nvme_tcp_set_sg_null(struct nvme_command *c)
- {
- struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- sg->addr = 0;
- sg->length = 0;
- sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
- NVME_SGL_FMT_TRANSPORT_A;
- }
- static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
- struct nvme_command *c, u32 data_len)
- {
- struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
- sg->length = cpu_to_le32(data_len);
- sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- }
- static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
- u32 data_len)
- {
- struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- sg->addr = 0;
- sg->length = cpu_to_le32(data_len);
- sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
- NVME_SGL_FMT_TRANSPORT_A;
- }
- static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
- struct nvme_tcp_queue *queue = &ctrl->queues[0];
- struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
- struct nvme_command *cmd = &pdu->cmd;
- u8 hdgst = nvme_tcp_hdgst_len(queue);
- memset(pdu, 0, sizeof(*pdu));
- pdu->hdr.type = nvme_tcp_cmd;
- if (queue->hdr_digest)
- pdu->hdr.flags |= NVME_TCP_F_HDGST;
- pdu->hdr.hlen = sizeof(*pdu);
- pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
- cmd->common.opcode = nvme_admin_async_event;
- cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
- cmd->common.flags |= NVME_CMD_SGL_METABUF;
- nvme_tcp_set_sg_null(cmd);
- ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
- ctrl->async_req.offset = 0;
- ctrl->async_req.curr_bio = NULL;
- ctrl->async_req.data_len = 0;
- nvme_tcp_queue_request(&ctrl->async_req, true, true);
- }
- static void nvme_tcp_complete_timed_out(struct request *rq)
- {
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
- nvmf_complete_timed_out_request(rq);
- }
- static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
- {
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- dev_warn(ctrl->device,
- "queue %d: timeout request %#x type %d\n",
- nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
- if (ctrl->state != NVME_CTRL_LIVE) {
- /*
- * If we are resetting, connecting or deleting we should
- * complete immediately because we may block controller
- * teardown or setup sequence
- * - ctrl disable/shutdown fabrics requests
- * - connect requests
- * - initialization admin requests
- * - I/O requests that entered after unquiescing and
- * the controller stopped responding
- *
- * All other requests should be cancelled by the error
- * recovery work, so it's fine that we fail it here.
- */
- nvme_tcp_complete_timed_out(rq);
- return BLK_EH_DONE;
- }
- /*
- * LIVE state should trigger the normal error recovery which will
- * handle completing this request.
- */
- nvme_tcp_error_recovery(ctrl);
- return BLK_EH_RESET_TIMER;
- }
- static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
- struct request *rq)
- {
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- struct nvme_command *c = &pdu->cmd;
- c->common.flags |= NVME_CMD_SGL_METABUF;
- if (!blk_rq_nr_phys_segments(rq))
- nvme_tcp_set_sg_null(c);
- else if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(req))
- nvme_tcp_set_sg_inline(queue, c, req->data_len);
- else
- nvme_tcp_set_sg_host_data(c, req->data_len);
- return 0;
- }
- static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
- struct request *rq)
- {
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- struct nvme_tcp_queue *queue = req->queue;
- u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
- blk_status_t ret;
- ret = nvme_setup_cmd(ns, rq);
- if (ret)
- return ret;
- req->state = NVME_TCP_SEND_CMD_PDU;
- req->status = cpu_to_le16(NVME_SC_SUCCESS);
- req->offset = 0;
- req->data_sent = 0;
- req->pdu_len = 0;
- req->pdu_sent = 0;
- req->h2cdata_left = 0;
- req->data_len = blk_rq_nr_phys_segments(rq) ?
- blk_rq_payload_bytes(rq) : 0;
- req->curr_bio = rq->bio;
- if (req->curr_bio && req->data_len)
- nvme_tcp_init_iter(req, rq_data_dir(rq));
- if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(req))
- req->pdu_len = req->data_len;
- pdu->hdr.type = nvme_tcp_cmd;
- pdu->hdr.flags = 0;
- if (queue->hdr_digest)
- pdu->hdr.flags |= NVME_TCP_F_HDGST;
- if (queue->data_digest && req->pdu_len) {
- pdu->hdr.flags |= NVME_TCP_F_DDGST;
- ddgst = nvme_tcp_ddgst_len(queue);
- }
- pdu->hdr.hlen = sizeof(*pdu);
- pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
- pdu->hdr.plen =
- cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
- ret = nvme_tcp_map_data(queue, rq);
- if (unlikely(ret)) {
- nvme_cleanup_cmd(rq);
- dev_err(queue->ctrl->ctrl.device,
- "Failed to map data (%d)\n", ret);
- return ret;
- }
- return 0;
- }
- static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
- {
- struct nvme_tcp_queue *queue = hctx->driver_data;
- if (!llist_empty(&queue->req_list))
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- }
- static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
- {
- struct nvme_ns *ns = hctx->queue->queuedata;
- struct nvme_tcp_queue *queue = hctx->driver_data;
- struct request *rq = bd->rq;
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
- blk_status_t ret;
- if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
- ret = nvme_tcp_setup_cmd_pdu(ns, rq);
- if (unlikely(ret))
- return ret;
- blk_mq_start_request(rq);
- nvme_tcp_queue_request(req, true, bd->last);
- return BLK_STS_OK;
- }
- static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
- {
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
- /* separate read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_READ];
- set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /* shared read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_READ].queue_offset = 0;
- }
- blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
- blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
- if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
- /* map dedicated poll queues only if we have queues left */
- set->map[HCTX_TYPE_POLL].nr_queues =
- ctrl->io_queues[HCTX_TYPE_POLL];
- set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT] +
- ctrl->io_queues[HCTX_TYPE_READ];
- blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
- }
- dev_info(ctrl->ctrl.device,
- "mapped %d/%d/%d default/read/poll queues.\n",
- ctrl->io_queues[HCTX_TYPE_DEFAULT],
- ctrl->io_queues[HCTX_TYPE_READ],
- ctrl->io_queues[HCTX_TYPE_POLL]);
- }
- static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
- {
- struct nvme_tcp_queue *queue = hctx->driver_data;
- struct sock *sk = queue->sock->sk;
- if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
- return 0;
- set_bit(NVME_TCP_Q_POLLING, &queue->flags);
- if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
- sk_busy_loop(sk, true);
- nvme_tcp_try_recv(queue);
- clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
- return queue->nr_cqe;
- }
- static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
- {
- struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
- struct sockaddr_storage src_addr;
- int ret, len;
- len = nvmf_get_address(ctrl, buf, size);
- mutex_lock(&queue->queue_lock);
- if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
- goto done;
- ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
- if (ret > 0) {
- if (len > 0)
- len--; /* strip trailing newline */
- len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
- (len) ? "," : "", &src_addr);
- }
- done:
- mutex_unlock(&queue->queue_lock);
- return len;
- }
- static const struct blk_mq_ops nvme_tcp_mq_ops = {
- .queue_rq = nvme_tcp_queue_rq,
- .commit_rqs = nvme_tcp_commit_rqs,
- .complete = nvme_complete_rq,
- .init_request = nvme_tcp_init_request,
- .exit_request = nvme_tcp_exit_request,
- .init_hctx = nvme_tcp_init_hctx,
- .timeout = nvme_tcp_timeout,
- .map_queues = nvme_tcp_map_queues,
- .poll = nvme_tcp_poll,
- };
- static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
- .queue_rq = nvme_tcp_queue_rq,
- .complete = nvme_complete_rq,
- .init_request = nvme_tcp_init_request,
- .exit_request = nvme_tcp_exit_request,
- .init_hctx = nvme_tcp_init_admin_hctx,
- .timeout = nvme_tcp_timeout,
- };
- static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
- .name = "tcp",
- .module = THIS_MODULE,
- .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
- .reg_read32 = nvmf_reg_read32,
- .reg_read64 = nvmf_reg_read64,
- .reg_write32 = nvmf_reg_write32,
- .free_ctrl = nvme_tcp_free_ctrl,
- .submit_async_event = nvme_tcp_submit_async_event,
- .delete_ctrl = nvme_tcp_delete_ctrl,
- .get_address = nvme_tcp_get_address,
- .stop_ctrl = nvme_tcp_stop_ctrl,
- };
- static bool
- nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
- {
- struct nvme_tcp_ctrl *ctrl;
- bool found = false;
- mutex_lock(&nvme_tcp_ctrl_mutex);
- list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
- found = nvmf_ip_options_match(&ctrl->ctrl, opts);
- if (found)
- break;
- }
- mutex_unlock(&nvme_tcp_ctrl_mutex);
- return found;
- }
- static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
- struct nvmf_ctrl_options *opts)
- {
- struct nvme_tcp_ctrl *ctrl;
- int ret;
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&ctrl->list);
- ctrl->ctrl.opts = opts;
- ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
- opts->nr_poll_queues + 1;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
- ctrl->ctrl.kato = opts->kato;
- INIT_DELAYED_WORK(&ctrl->connect_work,
- nvme_tcp_reconnect_ctrl_work);
- INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
- INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
- if (!(opts->mask & NVMF_OPT_TRSVCID)) {
- opts->trsvcid =
- kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
- if (!opts->trsvcid) {
- ret = -ENOMEM;
- goto out_free_ctrl;
- }
- opts->mask |= NVMF_OPT_TRSVCID;
- }
- ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
- opts->traddr, opts->trsvcid, &ctrl->addr);
- if (ret) {
- pr_err("malformed address passed: %s:%s\n",
- opts->traddr, opts->trsvcid);
- goto out_free_ctrl;
- }
- if (opts->mask & NVMF_OPT_HOST_TRADDR) {
- ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
- opts->host_traddr, NULL, &ctrl->src_addr);
- if (ret) {
- pr_err("malformed src address passed: %s\n",
- opts->host_traddr);
- goto out_free_ctrl;
- }
- }
- if (opts->mask & NVMF_OPT_HOST_IFACE) {
- if (!__dev_get_by_name(&init_net, opts->host_iface)) {
- pr_err("invalid interface passed: %s\n",
- opts->host_iface);
- ret = -ENODEV;
- goto out_free_ctrl;
- }
- }
- if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
- ret = -EALREADY;
- goto out_free_ctrl;
- }
- ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
- GFP_KERNEL);
- if (!ctrl->queues) {
- ret = -ENOMEM;
- goto out_free_ctrl;
- }
- ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
- if (ret)
- goto out_kfree_queues;
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- WARN_ON_ONCE(1);
- ret = -EINTR;
- goto out_uninit_ctrl;
- }
- ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
- if (ret)
- goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
- mutex_lock(&nvme_tcp_ctrl_mutex);
- list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
- mutex_unlock(&nvme_tcp_ctrl_mutex);
- return &ctrl->ctrl;
- out_uninit_ctrl:
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
- if (ret > 0)
- ret = -EIO;
- return ERR_PTR(ret);
- out_kfree_queues:
- kfree(ctrl->queues);
- out_free_ctrl:
- kfree(ctrl);
- return ERR_PTR(ret);
- }
- static struct nvmf_transport_ops nvme_tcp_transport = {
- .name = "tcp",
- .module = THIS_MODULE,
- .required_opts = NVMF_OPT_TRADDR,
- .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
- NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
- NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
- NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
- .create_ctrl = nvme_tcp_create_ctrl,
- };
- static int __init nvme_tcp_init_module(void)
- {
- nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
- if (!nvme_tcp_wq)
- return -ENOMEM;
- nvmf_register_transport(&nvme_tcp_transport);
- return 0;
- }
- static void __exit nvme_tcp_cleanup_module(void)
- {
- struct nvme_tcp_ctrl *ctrl;
- nvmf_unregister_transport(&nvme_tcp_transport);
- mutex_lock(&nvme_tcp_ctrl_mutex);
- list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
- nvme_delete_ctrl(&ctrl->ctrl);
- mutex_unlock(&nvme_tcp_ctrl_mutex);
- flush_workqueue(nvme_delete_wq);
- destroy_workqueue(nvme_tcp_wq);
- }
- module_init(nvme_tcp_init_module);
- module_exit(nvme_tcp_cleanup_module);
- MODULE_LICENSE("GPL v2");
|