apple.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Apple ANS NVM Express device driver
  4. * Copyright The Asahi Linux Contributors
  5. *
  6. * Based on the pci.c NVM Express device driver
  7. * Copyright (c) 2011-2014, Intel Corporation.
  8. * and on the rdma.c NVMe over Fabrics RDMA host code.
  9. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  10. */
  11. #include <linux/async.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/blk-mq.h>
  14. #include <linux/device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io-64-nonatomic-lo-hi.h>
  19. #include <linux/io.h>
  20. #include <linux/iopoll.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/mempool.h>
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/of_platform.h>
  26. #include <linux/once.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_domain.h>
  29. #include <linux/soc/apple/rtkit.h>
  30. #include <linux/soc/apple/sart.h>
  31. #include <linux/reset.h>
  32. #include <linux/time64.h>
  33. #include "nvme.h"
  34. #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
  35. #define APPLE_ANS_MAX_QUEUE_DEPTH 64
  36. #define APPLE_ANS_COPROC_CPU_CONTROL 0x44
  37. #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
  38. #define APPLE_ANS_ACQ_DB 0x1004
  39. #define APPLE_ANS_IOCQ_DB 0x100c
  40. #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
  41. #define APPLE_ANS_BOOT_STATUS 0x1300
  42. #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
  43. #define APPLE_ANS_UNKNOWN_CTRL 0x24008
  44. #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
  45. #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
  46. #define APPLE_ANS_LINEAR_SQ_EN BIT(0)
  47. #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
  48. #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
  49. #define APPLE_NVMMU_NUM_TCBS 0x28100
  50. #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
  51. #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
  52. #define APPLE_NVMMU_TCB_INVAL 0x28118
  53. #define APPLE_NVMMU_TCB_STAT 0x28120
  54. /*
  55. * This controller is a bit weird in the way command tags works: Both the
  56. * admin and the IO queue share the same tag space. Additionally, tags
  57. * cannot be higher than 0x40 which effectively limits the combined
  58. * queue depth to 0x40. Instead of wasting half of that on the admin queue
  59. * which gets much less traffic we instead reduce its size here.
  60. * The controller also doesn't support async event such that no space must
  61. * be reserved for NVME_NR_AEN_COMMANDS.
  62. */
  63. #define APPLE_NVME_AQ_DEPTH 2
  64. #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
  65. /*
  66. * These can be higher, but we need to ensure that any command doesn't
  67. * require an sg allocation that needs more than a page of data.
  68. */
  69. #define NVME_MAX_KB_SZ 4096
  70. #define NVME_MAX_SEGS 127
  71. /*
  72. * This controller comes with an embedded IOMMU known as NVMMU.
  73. * The NVMMU is pointed to an array of TCBs indexed by the command tag.
  74. * Each command must be configured inside this structure before it's allowed
  75. * to execute, including commands that don't require DMA transfers.
  76. *
  77. * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
  78. * admin queue): Those commands must still be added to the NVMMU but the DMA
  79. * buffers cannot be represented as PRPs and must instead be allowed using SART.
  80. *
  81. * Programming the PRPs to the same values as those in the submission queue
  82. * looks rather silly at first. This hardware is however designed for a kernel
  83. * that runs the NVMMU code in a higher exception level than the NVMe driver.
  84. * In that setting the NVMe driver first programs the submission queue entry
  85. * and then executes a hypercall to the code that is allowed to program the
  86. * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
  87. * verifying that they don't point to kernel text, data, pagetables, or similar
  88. * protected areas before programming the TCB to point to this shadow copy.
  89. * Since Linux doesn't do any of that we may as well just point both the queue
  90. * and the TCB PRP pointer to the same memory.
  91. */
  92. struct apple_nvmmu_tcb {
  93. u8 opcode;
  94. #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
  95. #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
  96. u8 dma_flags;
  97. u8 command_id;
  98. u8 _unk0;
  99. __le16 length;
  100. u8 _unk1[18];
  101. __le64 prp1;
  102. __le64 prp2;
  103. u8 _unk2[16];
  104. u8 aes_iv[8];
  105. u8 _aes_unk[64];
  106. };
  107. /*
  108. * The Apple NVMe controller only supports a single admin and a single IO queue
  109. * which are both limited to 64 entries and share a single interrupt.
  110. *
  111. * The completion queue works as usual. The submission "queue" instead is
  112. * an array indexed by the command tag on this hardware. Commands must also be
  113. * present in the NVMMU's tcb array. They are triggered by writing their tag to
  114. * a MMIO register.
  115. */
  116. struct apple_nvme_queue {
  117. struct nvme_command *sqes;
  118. struct nvme_completion *cqes;
  119. struct apple_nvmmu_tcb *tcbs;
  120. dma_addr_t sq_dma_addr;
  121. dma_addr_t cq_dma_addr;
  122. dma_addr_t tcb_dma_addr;
  123. u32 __iomem *sq_db;
  124. u32 __iomem *cq_db;
  125. u16 cq_head;
  126. u8 cq_phase;
  127. bool is_adminq;
  128. bool enabled;
  129. };
  130. /*
  131. * The apple_nvme_iod describes the data in an I/O.
  132. *
  133. * The sg pointer contains the list of PRP chunk allocations in addition
  134. * to the actual struct scatterlist.
  135. */
  136. struct apple_nvme_iod {
  137. struct nvme_request req;
  138. struct nvme_command cmd;
  139. struct apple_nvme_queue *q;
  140. int npages; /* In the PRP list. 0 means small pool in use */
  141. int nents; /* Used in scatterlist */
  142. dma_addr_t first_dma;
  143. unsigned int dma_len; /* length of single DMA segment mapping */
  144. struct scatterlist *sg;
  145. };
  146. struct apple_nvme {
  147. struct device *dev;
  148. void __iomem *mmio_coproc;
  149. void __iomem *mmio_nvme;
  150. struct device **pd_dev;
  151. struct device_link **pd_link;
  152. int pd_count;
  153. struct apple_sart *sart;
  154. struct apple_rtkit *rtk;
  155. struct reset_control *reset;
  156. struct dma_pool *prp_page_pool;
  157. struct dma_pool *prp_small_pool;
  158. mempool_t *iod_mempool;
  159. struct nvme_ctrl ctrl;
  160. struct work_struct remove_work;
  161. struct apple_nvme_queue adminq;
  162. struct apple_nvme_queue ioq;
  163. struct blk_mq_tag_set admin_tagset;
  164. struct blk_mq_tag_set tagset;
  165. int irq;
  166. spinlock_t lock;
  167. };
  168. static_assert(sizeof(struct nvme_command) == 64);
  169. static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
  170. static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
  171. {
  172. return container_of(ctrl, struct apple_nvme, ctrl);
  173. }
  174. static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
  175. {
  176. if (q->is_adminq)
  177. return container_of(q, struct apple_nvme, adminq);
  178. else
  179. return container_of(q, struct apple_nvme, ioq);
  180. }
  181. static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
  182. {
  183. if (q->is_adminq)
  184. return APPLE_NVME_AQ_DEPTH;
  185. else
  186. return APPLE_ANS_MAX_QUEUE_DEPTH;
  187. }
  188. static void apple_nvme_rtkit_crashed(void *cookie)
  189. {
  190. struct apple_nvme *anv = cookie;
  191. dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
  192. nvme_reset_ctrl(&anv->ctrl);
  193. }
  194. static int apple_nvme_sart_dma_setup(void *cookie,
  195. struct apple_rtkit_shmem *bfr)
  196. {
  197. struct apple_nvme *anv = cookie;
  198. int ret;
  199. if (bfr->iova)
  200. return -EINVAL;
  201. if (!bfr->size)
  202. return -EINVAL;
  203. bfr->buffer =
  204. dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
  205. if (!bfr->buffer)
  206. return -ENOMEM;
  207. ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
  208. if (ret) {
  209. dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
  210. bfr->buffer = NULL;
  211. return -ENOMEM;
  212. }
  213. return 0;
  214. }
  215. static void apple_nvme_sart_dma_destroy(void *cookie,
  216. struct apple_rtkit_shmem *bfr)
  217. {
  218. struct apple_nvme *anv = cookie;
  219. apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
  220. dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
  221. }
  222. static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
  223. .crashed = apple_nvme_rtkit_crashed,
  224. .shmem_setup = apple_nvme_sart_dma_setup,
  225. .shmem_destroy = apple_nvme_sart_dma_destroy,
  226. };
  227. static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
  228. {
  229. struct apple_nvme *anv = queue_to_apple_nvme(q);
  230. writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
  231. if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
  232. dev_warn_ratelimited(anv->dev,
  233. "NVMMU TCB invalidation failed\n");
  234. }
  235. static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
  236. struct nvme_command *cmd)
  237. {
  238. struct apple_nvme *anv = queue_to_apple_nvme(q);
  239. u32 tag = nvme_tag_from_cid(cmd->common.command_id);
  240. struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
  241. tcb->opcode = cmd->common.opcode;
  242. tcb->prp1 = cmd->common.dptr.prp1;
  243. tcb->prp2 = cmd->common.dptr.prp2;
  244. tcb->length = cmd->rw.length;
  245. tcb->command_id = tag;
  246. if (nvme_is_write(cmd))
  247. tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
  248. else
  249. tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
  250. memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
  251. /*
  252. * This lock here doesn't make much sense at a first glace but
  253. * removing it will result in occasional missed completetion
  254. * interrupts even though the commands still appear on the CQ.
  255. * It's unclear why this happens but our best guess is that
  256. * there is a bug in the firmware triggered when a new command
  257. * is issued while we're inside the irq handler between the
  258. * NVMMU invalidation (and making the tag available again)
  259. * and the final CQ update.
  260. */
  261. spin_lock_irq(&anv->lock);
  262. writel(tag, q->sq_db);
  263. spin_unlock_irq(&anv->lock);
  264. }
  265. /*
  266. * From pci.c:
  267. * Will slightly overestimate the number of pages needed. This is OK
  268. * as it only leads to a small amount of wasted memory for the lifetime of
  269. * the I/O.
  270. */
  271. static inline size_t apple_nvme_iod_alloc_size(void)
  272. {
  273. const unsigned int nprps = DIV_ROUND_UP(
  274. NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
  275. const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
  276. const size_t alloc_size = sizeof(__le64 *) * npages +
  277. sizeof(struct scatterlist) * NVME_MAX_SEGS;
  278. return alloc_size;
  279. }
  280. static void **apple_nvme_iod_list(struct request *req)
  281. {
  282. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  283. return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
  284. }
  285. static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
  286. {
  287. const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
  288. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  289. dma_addr_t dma_addr = iod->first_dma;
  290. int i;
  291. for (i = 0; i < iod->npages; i++) {
  292. __le64 *prp_list = apple_nvme_iod_list(req)[i];
  293. dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
  294. dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
  295. dma_addr = next_dma_addr;
  296. }
  297. }
  298. static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
  299. {
  300. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  301. if (iod->dma_len) {
  302. dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
  303. rq_dma_dir(req));
  304. return;
  305. }
  306. WARN_ON_ONCE(!iod->nents);
  307. dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
  308. if (iod->npages == 0)
  309. dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
  310. iod->first_dma);
  311. else
  312. apple_nvme_free_prps(anv, req);
  313. mempool_free(iod->sg, anv->iod_mempool);
  314. }
  315. static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
  316. {
  317. int i;
  318. struct scatterlist *sg;
  319. for_each_sg(sgl, sg, nents, i) {
  320. dma_addr_t phys = sg_phys(sg);
  321. pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
  322. i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
  323. sg_dma_len(sg));
  324. }
  325. }
  326. static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
  327. struct request *req,
  328. struct nvme_rw_command *cmnd)
  329. {
  330. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  331. struct dma_pool *pool;
  332. int length = blk_rq_payload_bytes(req);
  333. struct scatterlist *sg = iod->sg;
  334. int dma_len = sg_dma_len(sg);
  335. u64 dma_addr = sg_dma_address(sg);
  336. int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
  337. __le64 *prp_list;
  338. void **list = apple_nvme_iod_list(req);
  339. dma_addr_t prp_dma;
  340. int nprps, i;
  341. length -= (NVME_CTRL_PAGE_SIZE - offset);
  342. if (length <= 0) {
  343. iod->first_dma = 0;
  344. goto done;
  345. }
  346. dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
  347. if (dma_len) {
  348. dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
  349. } else {
  350. sg = sg_next(sg);
  351. dma_addr = sg_dma_address(sg);
  352. dma_len = sg_dma_len(sg);
  353. }
  354. if (length <= NVME_CTRL_PAGE_SIZE) {
  355. iod->first_dma = dma_addr;
  356. goto done;
  357. }
  358. nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
  359. if (nprps <= (256 / 8)) {
  360. pool = anv->prp_small_pool;
  361. iod->npages = 0;
  362. } else {
  363. pool = anv->prp_page_pool;
  364. iod->npages = 1;
  365. }
  366. prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
  367. if (!prp_list) {
  368. iod->first_dma = dma_addr;
  369. iod->npages = -1;
  370. return BLK_STS_RESOURCE;
  371. }
  372. list[0] = prp_list;
  373. iod->first_dma = prp_dma;
  374. i = 0;
  375. for (;;) {
  376. if (i == NVME_CTRL_PAGE_SIZE >> 3) {
  377. __le64 *old_prp_list = prp_list;
  378. prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
  379. if (!prp_list)
  380. goto free_prps;
  381. list[iod->npages++] = prp_list;
  382. prp_list[0] = old_prp_list[i - 1];
  383. old_prp_list[i - 1] = cpu_to_le64(prp_dma);
  384. i = 1;
  385. }
  386. prp_list[i++] = cpu_to_le64(dma_addr);
  387. dma_len -= NVME_CTRL_PAGE_SIZE;
  388. dma_addr += NVME_CTRL_PAGE_SIZE;
  389. length -= NVME_CTRL_PAGE_SIZE;
  390. if (length <= 0)
  391. break;
  392. if (dma_len > 0)
  393. continue;
  394. if (unlikely(dma_len < 0))
  395. goto bad_sgl;
  396. sg = sg_next(sg);
  397. dma_addr = sg_dma_address(sg);
  398. dma_len = sg_dma_len(sg);
  399. }
  400. done:
  401. cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
  402. cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
  403. return BLK_STS_OK;
  404. free_prps:
  405. apple_nvme_free_prps(anv, req);
  406. return BLK_STS_RESOURCE;
  407. bad_sgl:
  408. WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
  409. "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
  410. iod->nents);
  411. return BLK_STS_IOERR;
  412. }
  413. static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
  414. struct request *req,
  415. struct nvme_rw_command *cmnd,
  416. struct bio_vec *bv)
  417. {
  418. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  419. unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
  420. unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
  421. iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
  422. if (dma_mapping_error(anv->dev, iod->first_dma))
  423. return BLK_STS_RESOURCE;
  424. iod->dma_len = bv->bv_len;
  425. cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
  426. if (bv->bv_len > first_prp_len)
  427. cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
  428. return BLK_STS_OK;
  429. }
  430. static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
  431. struct request *req,
  432. struct nvme_command *cmnd)
  433. {
  434. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  435. blk_status_t ret = BLK_STS_RESOURCE;
  436. int nr_mapped;
  437. if (blk_rq_nr_phys_segments(req) == 1) {
  438. struct bio_vec bv = req_bvec(req);
  439. if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
  440. return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
  441. &bv);
  442. }
  443. iod->dma_len = 0;
  444. iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
  445. if (!iod->sg)
  446. return BLK_STS_RESOURCE;
  447. sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
  448. iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
  449. if (!iod->nents)
  450. goto out_free_sg;
  451. nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
  452. rq_dma_dir(req), DMA_ATTR_NO_WARN);
  453. if (!nr_mapped)
  454. goto out_free_sg;
  455. ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
  456. if (ret != BLK_STS_OK)
  457. goto out_unmap_sg;
  458. return BLK_STS_OK;
  459. out_unmap_sg:
  460. dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
  461. out_free_sg:
  462. mempool_free(iod->sg, anv->iod_mempool);
  463. return ret;
  464. }
  465. static __always_inline void apple_nvme_unmap_rq(struct request *req)
  466. {
  467. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  468. struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
  469. if (blk_rq_nr_phys_segments(req))
  470. apple_nvme_unmap_data(anv, req);
  471. }
  472. static void apple_nvme_complete_rq(struct request *req)
  473. {
  474. apple_nvme_unmap_rq(req);
  475. nvme_complete_rq(req);
  476. }
  477. static void apple_nvme_complete_batch(struct io_comp_batch *iob)
  478. {
  479. nvme_complete_batch(iob, apple_nvme_unmap_rq);
  480. }
  481. static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
  482. {
  483. struct nvme_completion *hcqe = &q->cqes[q->cq_head];
  484. return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
  485. }
  486. static inline struct blk_mq_tags *
  487. apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
  488. {
  489. if (q->is_adminq)
  490. return anv->admin_tagset.tags[0];
  491. else
  492. return anv->tagset.tags[0];
  493. }
  494. static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
  495. struct io_comp_batch *iob, u16 idx)
  496. {
  497. struct apple_nvme *anv = queue_to_apple_nvme(q);
  498. struct nvme_completion *cqe = &q->cqes[idx];
  499. __u16 command_id = READ_ONCE(cqe->command_id);
  500. struct request *req;
  501. apple_nvmmu_inval(q, command_id);
  502. req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
  503. if (unlikely(!req)) {
  504. dev_warn(anv->dev, "invalid id %d completed", command_id);
  505. return;
  506. }
  507. if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
  508. !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
  509. apple_nvme_complete_batch))
  510. apple_nvme_complete_rq(req);
  511. }
  512. static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
  513. {
  514. u32 tmp = q->cq_head + 1;
  515. if (tmp == apple_nvme_queue_depth(q)) {
  516. q->cq_head = 0;
  517. q->cq_phase ^= 1;
  518. } else {
  519. q->cq_head = tmp;
  520. }
  521. }
  522. static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
  523. struct io_comp_batch *iob)
  524. {
  525. bool found = false;
  526. while (apple_nvme_cqe_pending(q)) {
  527. found = true;
  528. /*
  529. * load-load control dependency between phase and the rest of
  530. * the cqe requires a full read memory barrier
  531. */
  532. dma_rmb();
  533. apple_nvme_handle_cqe(q, iob, q->cq_head);
  534. apple_nvme_update_cq_head(q);
  535. }
  536. if (found)
  537. writel(q->cq_head, q->cq_db);
  538. return found;
  539. }
  540. static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
  541. {
  542. bool found;
  543. DEFINE_IO_COMP_BATCH(iob);
  544. if (!READ_ONCE(q->enabled) && !force)
  545. return false;
  546. found = apple_nvme_poll_cq(q, &iob);
  547. if (!rq_list_empty(iob.req_list))
  548. apple_nvme_complete_batch(&iob);
  549. return found;
  550. }
  551. static irqreturn_t apple_nvme_irq(int irq, void *data)
  552. {
  553. struct apple_nvme *anv = data;
  554. bool handled = false;
  555. unsigned long flags;
  556. spin_lock_irqsave(&anv->lock, flags);
  557. if (apple_nvme_handle_cq(&anv->ioq, false))
  558. handled = true;
  559. if (apple_nvme_handle_cq(&anv->adminq, false))
  560. handled = true;
  561. spin_unlock_irqrestore(&anv->lock, flags);
  562. if (handled)
  563. return IRQ_HANDLED;
  564. return IRQ_NONE;
  565. }
  566. static int apple_nvme_create_cq(struct apple_nvme *anv)
  567. {
  568. struct nvme_command c = {};
  569. /*
  570. * Note: we (ab)use the fact that the prp fields survive if no data
  571. * is attached to the request.
  572. */
  573. c.create_cq.opcode = nvme_admin_create_cq;
  574. c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
  575. c.create_cq.cqid = cpu_to_le16(1);
  576. c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
  577. c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
  578. c.create_cq.irq_vector = cpu_to_le16(0);
  579. return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
  580. }
  581. static int apple_nvme_remove_cq(struct apple_nvme *anv)
  582. {
  583. struct nvme_command c = {};
  584. c.delete_queue.opcode = nvme_admin_delete_cq;
  585. c.delete_queue.qid = cpu_to_le16(1);
  586. return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
  587. }
  588. static int apple_nvme_create_sq(struct apple_nvme *anv)
  589. {
  590. struct nvme_command c = {};
  591. /*
  592. * Note: we (ab)use the fact that the prp fields survive if no data
  593. * is attached to the request.
  594. */
  595. c.create_sq.opcode = nvme_admin_create_sq;
  596. c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
  597. c.create_sq.sqid = cpu_to_le16(1);
  598. c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
  599. c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
  600. c.create_sq.cqid = cpu_to_le16(1);
  601. return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
  602. }
  603. static int apple_nvme_remove_sq(struct apple_nvme *anv)
  604. {
  605. struct nvme_command c = {};
  606. c.delete_queue.opcode = nvme_admin_delete_sq;
  607. c.delete_queue.qid = cpu_to_le16(1);
  608. return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
  609. }
  610. static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
  611. const struct blk_mq_queue_data *bd)
  612. {
  613. struct nvme_ns *ns = hctx->queue->queuedata;
  614. struct apple_nvme_queue *q = hctx->driver_data;
  615. struct apple_nvme *anv = queue_to_apple_nvme(q);
  616. struct request *req = bd->rq;
  617. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  618. struct nvme_command *cmnd = &iod->cmd;
  619. blk_status_t ret;
  620. iod->npages = -1;
  621. iod->nents = 0;
  622. /*
  623. * We should not need to do this, but we're still using this to
  624. * ensure we can drain requests on a dying queue.
  625. */
  626. if (unlikely(!READ_ONCE(q->enabled)))
  627. return BLK_STS_IOERR;
  628. if (!nvme_check_ready(&anv->ctrl, req, true))
  629. return nvme_fail_nonready_command(&anv->ctrl, req);
  630. ret = nvme_setup_cmd(ns, req);
  631. if (ret)
  632. return ret;
  633. if (blk_rq_nr_phys_segments(req)) {
  634. ret = apple_nvme_map_data(anv, req, cmnd);
  635. if (ret)
  636. goto out_free_cmd;
  637. }
  638. blk_mq_start_request(req);
  639. apple_nvme_submit_cmd(q, cmnd);
  640. return BLK_STS_OK;
  641. out_free_cmd:
  642. nvme_cleanup_cmd(req);
  643. return ret;
  644. }
  645. static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  646. unsigned int hctx_idx)
  647. {
  648. hctx->driver_data = data;
  649. return 0;
  650. }
  651. static int apple_nvme_init_request(struct blk_mq_tag_set *set,
  652. struct request *req, unsigned int hctx_idx,
  653. unsigned int numa_node)
  654. {
  655. struct apple_nvme_queue *q = set->driver_data;
  656. struct apple_nvme *anv = queue_to_apple_nvme(q);
  657. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  658. struct nvme_request *nreq = nvme_req(req);
  659. iod->q = q;
  660. nreq->ctrl = &anv->ctrl;
  661. nreq->cmd = &iod->cmd;
  662. return 0;
  663. }
  664. static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
  665. {
  666. u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
  667. bool dead = false, freeze = false;
  668. unsigned long flags;
  669. if (apple_rtkit_is_crashed(anv->rtk))
  670. dead = true;
  671. if (!(csts & NVME_CSTS_RDY))
  672. dead = true;
  673. if (csts & NVME_CSTS_CFS)
  674. dead = true;
  675. if (anv->ctrl.state == NVME_CTRL_LIVE ||
  676. anv->ctrl.state == NVME_CTRL_RESETTING) {
  677. freeze = true;
  678. nvme_start_freeze(&anv->ctrl);
  679. }
  680. /*
  681. * Give the controller a chance to complete all entered requests if
  682. * doing a safe shutdown.
  683. */
  684. if (!dead && shutdown && freeze)
  685. nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
  686. nvme_stop_queues(&anv->ctrl);
  687. if (!dead) {
  688. if (READ_ONCE(anv->ioq.enabled)) {
  689. apple_nvme_remove_sq(anv);
  690. apple_nvme_remove_cq(anv);
  691. }
  692. if (shutdown)
  693. nvme_shutdown_ctrl(&anv->ctrl);
  694. nvme_disable_ctrl(&anv->ctrl);
  695. }
  696. WRITE_ONCE(anv->ioq.enabled, false);
  697. WRITE_ONCE(anv->adminq.enabled, false);
  698. mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
  699. nvme_stop_admin_queue(&anv->ctrl);
  700. /* last chance to complete any requests before nvme_cancel_request */
  701. spin_lock_irqsave(&anv->lock, flags);
  702. apple_nvme_handle_cq(&anv->ioq, true);
  703. apple_nvme_handle_cq(&anv->adminq, true);
  704. spin_unlock_irqrestore(&anv->lock, flags);
  705. nvme_cancel_tagset(&anv->ctrl);
  706. nvme_cancel_admin_tagset(&anv->ctrl);
  707. /*
  708. * The driver will not be starting up queues again if shutting down so
  709. * must flush all entered requests to their failed completion to avoid
  710. * deadlocking blk-mq hot-cpu notifier.
  711. */
  712. if (shutdown) {
  713. nvme_start_queues(&anv->ctrl);
  714. nvme_start_admin_queue(&anv->ctrl);
  715. }
  716. }
  717. static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
  718. {
  719. struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
  720. struct apple_nvme_queue *q = iod->q;
  721. struct apple_nvme *anv = queue_to_apple_nvme(q);
  722. unsigned long flags;
  723. u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
  724. if (anv->ctrl.state != NVME_CTRL_LIVE) {
  725. /*
  726. * From rdma.c:
  727. * If we are resetting, connecting or deleting we should
  728. * complete immediately because we may block controller
  729. * teardown or setup sequence
  730. * - ctrl disable/shutdown fabrics requests
  731. * - connect requests
  732. * - initialization admin requests
  733. * - I/O requests that entered after unquiescing and
  734. * the controller stopped responding
  735. *
  736. * All other requests should be cancelled by the error
  737. * recovery work, so it's fine that we fail it here.
  738. */
  739. dev_warn(anv->dev,
  740. "I/O %d(aq:%d) timeout while not in live state\n",
  741. req->tag, q->is_adminq);
  742. if (blk_mq_request_started(req) &&
  743. !blk_mq_request_completed(req)) {
  744. nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
  745. nvme_req(req)->flags |= NVME_REQ_CANCELLED;
  746. blk_mq_complete_request(req);
  747. }
  748. return BLK_EH_DONE;
  749. }
  750. /* check if we just missed an interrupt if we're still alive */
  751. if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
  752. spin_lock_irqsave(&anv->lock, flags);
  753. apple_nvme_handle_cq(q, false);
  754. spin_unlock_irqrestore(&anv->lock, flags);
  755. if (blk_mq_request_completed(req)) {
  756. dev_warn(anv->dev,
  757. "I/O %d(aq:%d) timeout: completion polled\n",
  758. req->tag, q->is_adminq);
  759. return BLK_EH_DONE;
  760. }
  761. }
  762. /*
  763. * aborting commands isn't supported which leaves a full reset as our
  764. * only option here
  765. */
  766. dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
  767. req->tag, q->is_adminq);
  768. nvme_req(req)->flags |= NVME_REQ_CANCELLED;
  769. apple_nvme_disable(anv, false);
  770. nvme_reset_ctrl(&anv->ctrl);
  771. return BLK_EH_DONE;
  772. }
  773. static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
  774. struct io_comp_batch *iob)
  775. {
  776. struct apple_nvme_queue *q = hctx->driver_data;
  777. struct apple_nvme *anv = queue_to_apple_nvme(q);
  778. bool found;
  779. unsigned long flags;
  780. spin_lock_irqsave(&anv->lock, flags);
  781. found = apple_nvme_poll_cq(q, iob);
  782. spin_unlock_irqrestore(&anv->lock, flags);
  783. return found;
  784. }
  785. static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
  786. .queue_rq = apple_nvme_queue_rq,
  787. .complete = apple_nvme_complete_rq,
  788. .init_hctx = apple_nvme_init_hctx,
  789. .init_request = apple_nvme_init_request,
  790. .timeout = apple_nvme_timeout,
  791. };
  792. static const struct blk_mq_ops apple_nvme_mq_ops = {
  793. .queue_rq = apple_nvme_queue_rq,
  794. .complete = apple_nvme_complete_rq,
  795. .init_hctx = apple_nvme_init_hctx,
  796. .init_request = apple_nvme_init_request,
  797. .timeout = apple_nvme_timeout,
  798. .poll = apple_nvme_poll,
  799. };
  800. static void apple_nvme_init_queue(struct apple_nvme_queue *q)
  801. {
  802. unsigned int depth = apple_nvme_queue_depth(q);
  803. q->cq_head = 0;
  804. q->cq_phase = 1;
  805. memset(q->tcbs, 0,
  806. APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
  807. memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
  808. WRITE_ONCE(q->enabled, true);
  809. wmb(); /* ensure the first interrupt sees the initialization */
  810. }
  811. static void apple_nvme_reset_work(struct work_struct *work)
  812. {
  813. unsigned int nr_io_queues = 1;
  814. int ret;
  815. u32 boot_status, aqa;
  816. struct apple_nvme *anv =
  817. container_of(work, struct apple_nvme, ctrl.reset_work);
  818. if (anv->ctrl.state != NVME_CTRL_RESETTING) {
  819. dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
  820. anv->ctrl.state);
  821. ret = -ENODEV;
  822. goto out;
  823. }
  824. /* there's unfortunately no known way to recover if RTKit crashed :( */
  825. if (apple_rtkit_is_crashed(anv->rtk)) {
  826. dev_err(anv->dev,
  827. "RTKit has crashed without any way to recover.");
  828. ret = -EIO;
  829. goto out;
  830. }
  831. /* RTKit must be shut down cleanly for the (soft)-reset to work */
  832. if (apple_rtkit_is_running(anv->rtk)) {
  833. /* reset the controller if it is enabled */
  834. if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
  835. apple_nvme_disable(anv, false);
  836. dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
  837. ret = apple_rtkit_shutdown(anv->rtk);
  838. if (ret)
  839. goto out;
  840. }
  841. writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
  842. ret = reset_control_assert(anv->reset);
  843. if (ret)
  844. goto out;
  845. ret = apple_rtkit_reinit(anv->rtk);
  846. if (ret)
  847. goto out;
  848. ret = reset_control_deassert(anv->reset);
  849. if (ret)
  850. goto out;
  851. writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
  852. anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
  853. ret = apple_rtkit_boot(anv->rtk);
  854. if (ret) {
  855. dev_err(anv->dev, "ANS did not boot");
  856. goto out;
  857. }
  858. ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
  859. boot_status,
  860. boot_status == APPLE_ANS_BOOT_STATUS_OK,
  861. USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
  862. if (ret) {
  863. dev_err(anv->dev, "ANS did not initialize");
  864. goto out;
  865. }
  866. dev_dbg(anv->dev, "ANS booted successfully.");
  867. /*
  868. * Limit the max command size to prevent iod->sg allocations going
  869. * over a single page.
  870. */
  871. anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
  872. dma_max_mapping_size(anv->dev) >> 9);
  873. anv->ctrl.max_segments = NVME_MAX_SEGS;
  874. dma_set_max_seg_size(anv->dev, 0xffffffff);
  875. /*
  876. * Enable NVMMU and linear submission queues.
  877. * While we could keep those disabled and pretend this is slightly
  878. * more common NVMe controller we'd still need some quirks (e.g.
  879. * sq entries will be 128 bytes) and Apple might drop support for
  880. * that mode in the future.
  881. */
  882. writel(APPLE_ANS_LINEAR_SQ_EN,
  883. anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
  884. /* Allow as many pending command as possible for both queues */
  885. writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
  886. anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
  887. /* Setup the NVMMU for the maximum admin and IO queue depth */
  888. writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
  889. anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
  890. /*
  891. * This is probably a chicken bit: without it all commands where any PRP
  892. * is set to zero (including those that don't use that field) fail and
  893. * the co-processor complains about "completed with err BAD_CMD-" or
  894. * a "NULL_PRP_PTR_ERR" in the syslog
  895. */
  896. writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
  897. ~APPLE_ANS_PRP_NULL_CHECK,
  898. anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
  899. /* Setup the admin queue */
  900. aqa = APPLE_NVME_AQ_DEPTH - 1;
  901. aqa |= aqa << 16;
  902. writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
  903. writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
  904. writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
  905. /* Setup NVMMU for both queues */
  906. writeq(anv->adminq.tcb_dma_addr,
  907. anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
  908. writeq(anv->ioq.tcb_dma_addr,
  909. anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
  910. anv->ctrl.sqsize =
  911. APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
  912. anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
  913. dev_dbg(anv->dev, "Enabling controller now");
  914. ret = nvme_enable_ctrl(&anv->ctrl);
  915. if (ret)
  916. goto out;
  917. dev_dbg(anv->dev, "Starting admin queue");
  918. apple_nvme_init_queue(&anv->adminq);
  919. nvme_start_admin_queue(&anv->ctrl);
  920. if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
  921. dev_warn(anv->ctrl.device,
  922. "failed to mark controller CONNECTING\n");
  923. ret = -ENODEV;
  924. goto out;
  925. }
  926. ret = nvme_init_ctrl_finish(&anv->ctrl);
  927. if (ret)
  928. goto out;
  929. dev_dbg(anv->dev, "Creating IOCQ");
  930. ret = apple_nvme_create_cq(anv);
  931. if (ret)
  932. goto out;
  933. dev_dbg(anv->dev, "Creating IOSQ");
  934. ret = apple_nvme_create_sq(anv);
  935. if (ret)
  936. goto out_remove_cq;
  937. apple_nvme_init_queue(&anv->ioq);
  938. nr_io_queues = 1;
  939. ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
  940. if (ret)
  941. goto out_remove_sq;
  942. if (nr_io_queues != 1) {
  943. ret = -ENXIO;
  944. goto out_remove_sq;
  945. }
  946. anv->ctrl.queue_count = nr_io_queues + 1;
  947. nvme_start_queues(&anv->ctrl);
  948. nvme_wait_freeze(&anv->ctrl);
  949. blk_mq_update_nr_hw_queues(&anv->tagset, 1);
  950. nvme_unfreeze(&anv->ctrl);
  951. if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
  952. dev_warn(anv->ctrl.device,
  953. "failed to mark controller live state\n");
  954. ret = -ENODEV;
  955. goto out_remove_sq;
  956. }
  957. nvme_start_ctrl(&anv->ctrl);
  958. dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
  959. return;
  960. out_remove_sq:
  961. apple_nvme_remove_sq(anv);
  962. out_remove_cq:
  963. apple_nvme_remove_cq(anv);
  964. out:
  965. dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
  966. nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
  967. nvme_get_ctrl(&anv->ctrl);
  968. apple_nvme_disable(anv, false);
  969. nvme_kill_queues(&anv->ctrl);
  970. if (!queue_work(nvme_wq, &anv->remove_work))
  971. nvme_put_ctrl(&anv->ctrl);
  972. }
  973. static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
  974. {
  975. struct apple_nvme *anv =
  976. container_of(work, struct apple_nvme, remove_work);
  977. nvme_put_ctrl(&anv->ctrl);
  978. device_release_driver(anv->dev);
  979. }
  980. static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
  981. {
  982. *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
  983. return 0;
  984. }
  985. static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
  986. {
  987. writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
  988. return 0;
  989. }
  990. static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
  991. {
  992. *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
  993. return 0;
  994. }
  995. static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
  996. {
  997. struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
  998. return snprintf(buf, size, "%s\n", dev_name(dev));
  999. }
  1000. static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
  1001. {
  1002. struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
  1003. if (anv->ctrl.admin_q)
  1004. blk_put_queue(anv->ctrl.admin_q);
  1005. put_device(anv->dev);
  1006. }
  1007. static const struct nvme_ctrl_ops nvme_ctrl_ops = {
  1008. .name = "apple-nvme",
  1009. .module = THIS_MODULE,
  1010. .flags = 0,
  1011. .reg_read32 = apple_nvme_reg_read32,
  1012. .reg_write32 = apple_nvme_reg_write32,
  1013. .reg_read64 = apple_nvme_reg_read64,
  1014. .free_ctrl = apple_nvme_free_ctrl,
  1015. .get_address = apple_nvme_get_address,
  1016. };
  1017. static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
  1018. {
  1019. struct apple_nvme *anv = data;
  1020. flush_work(&anv->ctrl.reset_work);
  1021. flush_work(&anv->ctrl.scan_work);
  1022. nvme_put_ctrl(&anv->ctrl);
  1023. }
  1024. static void devm_apple_nvme_put_tag_set(void *data)
  1025. {
  1026. blk_mq_free_tag_set(data);
  1027. }
  1028. static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
  1029. {
  1030. int ret;
  1031. anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
  1032. anv->admin_tagset.nr_hw_queues = 1;
  1033. anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
  1034. anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
  1035. anv->admin_tagset.numa_node = NUMA_NO_NODE;
  1036. anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
  1037. anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
  1038. anv->admin_tagset.driver_data = &anv->adminq;
  1039. ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
  1040. if (ret)
  1041. return ret;
  1042. ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
  1043. &anv->admin_tagset);
  1044. if (ret)
  1045. return ret;
  1046. anv->tagset.ops = &apple_nvme_mq_ops;
  1047. anv->tagset.nr_hw_queues = 1;
  1048. anv->tagset.nr_maps = 1;
  1049. /*
  1050. * Tags are used as an index to the NVMMU and must be unique across
  1051. * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
  1052. * must be marked as reserved in the IO queue.
  1053. */
  1054. anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
  1055. anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
  1056. anv->tagset.timeout = NVME_IO_TIMEOUT;
  1057. anv->tagset.numa_node = NUMA_NO_NODE;
  1058. anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
  1059. anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
  1060. anv->tagset.driver_data = &anv->ioq;
  1061. ret = blk_mq_alloc_tag_set(&anv->tagset);
  1062. if (ret)
  1063. return ret;
  1064. ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
  1065. &anv->tagset);
  1066. if (ret)
  1067. return ret;
  1068. anv->ctrl.admin_tagset = &anv->admin_tagset;
  1069. anv->ctrl.tagset = &anv->tagset;
  1070. return 0;
  1071. }
  1072. static int apple_nvme_queue_alloc(struct apple_nvme *anv,
  1073. struct apple_nvme_queue *q)
  1074. {
  1075. unsigned int depth = apple_nvme_queue_depth(q);
  1076. q->cqes = dmam_alloc_coherent(anv->dev,
  1077. depth * sizeof(struct nvme_completion),
  1078. &q->cq_dma_addr, GFP_KERNEL);
  1079. if (!q->cqes)
  1080. return -ENOMEM;
  1081. q->sqes = dmam_alloc_coherent(anv->dev,
  1082. depth * sizeof(struct nvme_command),
  1083. &q->sq_dma_addr, GFP_KERNEL);
  1084. if (!q->sqes)
  1085. return -ENOMEM;
  1086. /*
  1087. * We need the maximum queue depth here because the NVMMU only has a
  1088. * single depth configuration shared between both queues.
  1089. */
  1090. q->tcbs = dmam_alloc_coherent(anv->dev,
  1091. APPLE_ANS_MAX_QUEUE_DEPTH *
  1092. sizeof(struct apple_nvmmu_tcb),
  1093. &q->tcb_dma_addr, GFP_KERNEL);
  1094. if (!q->tcbs)
  1095. return -ENOMEM;
  1096. /*
  1097. * initialize phase to make sure the allocated and empty memory
  1098. * doesn't look like a full cq already.
  1099. */
  1100. q->cq_phase = 1;
  1101. return 0;
  1102. }
  1103. static void apple_nvme_detach_genpd(struct apple_nvme *anv)
  1104. {
  1105. int i;
  1106. if (anv->pd_count <= 1)
  1107. return;
  1108. for (i = anv->pd_count - 1; i >= 0; i--) {
  1109. if (anv->pd_link[i])
  1110. device_link_del(anv->pd_link[i]);
  1111. if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
  1112. dev_pm_domain_detach(anv->pd_dev[i], true);
  1113. }
  1114. }
  1115. static int apple_nvme_attach_genpd(struct apple_nvme *anv)
  1116. {
  1117. struct device *dev = anv->dev;
  1118. int i;
  1119. anv->pd_count = of_count_phandle_with_args(
  1120. dev->of_node, "power-domains", "#power-domain-cells");
  1121. if (anv->pd_count <= 1)
  1122. return 0;
  1123. anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
  1124. GFP_KERNEL);
  1125. if (!anv->pd_dev)
  1126. return -ENOMEM;
  1127. anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
  1128. GFP_KERNEL);
  1129. if (!anv->pd_link)
  1130. return -ENOMEM;
  1131. for (i = 0; i < anv->pd_count; i++) {
  1132. anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
  1133. if (IS_ERR(anv->pd_dev[i])) {
  1134. apple_nvme_detach_genpd(anv);
  1135. return PTR_ERR(anv->pd_dev[i]);
  1136. }
  1137. anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
  1138. DL_FLAG_STATELESS |
  1139. DL_FLAG_PM_RUNTIME |
  1140. DL_FLAG_RPM_ACTIVE);
  1141. if (!anv->pd_link[i]) {
  1142. apple_nvme_detach_genpd(anv);
  1143. return -EINVAL;
  1144. }
  1145. }
  1146. return 0;
  1147. }
  1148. static void devm_apple_nvme_mempool_destroy(void *data)
  1149. {
  1150. mempool_destroy(data);
  1151. }
  1152. static int apple_nvme_probe(struct platform_device *pdev)
  1153. {
  1154. struct device *dev = &pdev->dev;
  1155. struct apple_nvme *anv;
  1156. int ret;
  1157. anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
  1158. if (!anv)
  1159. return -ENOMEM;
  1160. anv->dev = get_device(dev);
  1161. anv->adminq.is_adminq = true;
  1162. platform_set_drvdata(pdev, anv);
  1163. ret = apple_nvme_attach_genpd(anv);
  1164. if (ret < 0) {
  1165. dev_err_probe(dev, ret, "Failed to attach power domains");
  1166. goto put_dev;
  1167. }
  1168. if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
  1169. ret = -ENXIO;
  1170. goto put_dev;
  1171. }
  1172. anv->irq = platform_get_irq(pdev, 0);
  1173. if (anv->irq < 0) {
  1174. ret = anv->irq;
  1175. goto put_dev;
  1176. }
  1177. if (!anv->irq) {
  1178. ret = -ENXIO;
  1179. goto put_dev;
  1180. }
  1181. anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
  1182. if (IS_ERR(anv->mmio_coproc)) {
  1183. ret = PTR_ERR(anv->mmio_coproc);
  1184. goto put_dev;
  1185. }
  1186. anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
  1187. if (IS_ERR(anv->mmio_nvme)) {
  1188. ret = PTR_ERR(anv->mmio_nvme);
  1189. goto put_dev;
  1190. }
  1191. anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
  1192. anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
  1193. anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
  1194. anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
  1195. anv->sart = devm_apple_sart_get(dev);
  1196. if (IS_ERR(anv->sart)) {
  1197. ret = dev_err_probe(dev, PTR_ERR(anv->sart),
  1198. "Failed to initialize SART");
  1199. goto put_dev;
  1200. }
  1201. anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
  1202. if (IS_ERR(anv->reset)) {
  1203. ret = dev_err_probe(dev, PTR_ERR(anv->reset),
  1204. "Failed to get reset control");
  1205. goto put_dev;
  1206. }
  1207. INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
  1208. INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
  1209. spin_lock_init(&anv->lock);
  1210. ret = apple_nvme_queue_alloc(anv, &anv->adminq);
  1211. if (ret)
  1212. goto put_dev;
  1213. ret = apple_nvme_queue_alloc(anv, &anv->ioq);
  1214. if (ret)
  1215. goto put_dev;
  1216. anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
  1217. NVME_CTRL_PAGE_SIZE,
  1218. NVME_CTRL_PAGE_SIZE, 0);
  1219. if (!anv->prp_page_pool) {
  1220. ret = -ENOMEM;
  1221. goto put_dev;
  1222. }
  1223. anv->prp_small_pool =
  1224. dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
  1225. if (!anv->prp_small_pool) {
  1226. ret = -ENOMEM;
  1227. goto put_dev;
  1228. }
  1229. WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
  1230. anv->iod_mempool =
  1231. mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
  1232. if (!anv->iod_mempool) {
  1233. ret = -ENOMEM;
  1234. goto put_dev;
  1235. }
  1236. ret = devm_add_action_or_reset(anv->dev,
  1237. devm_apple_nvme_mempool_destroy, anv->iod_mempool);
  1238. if (ret)
  1239. goto put_dev;
  1240. ret = apple_nvme_alloc_tagsets(anv);
  1241. if (ret)
  1242. goto put_dev;
  1243. ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
  1244. "nvme-apple", anv);
  1245. if (ret) {
  1246. dev_err_probe(dev, ret, "Failed to request IRQ");
  1247. goto put_dev;
  1248. }
  1249. anv->rtk =
  1250. devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
  1251. if (IS_ERR(anv->rtk)) {
  1252. ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
  1253. "Failed to initialize RTKit");
  1254. goto put_dev;
  1255. }
  1256. ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
  1257. NVME_QUIRK_SKIP_CID_GEN);
  1258. if (ret) {
  1259. dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
  1260. goto put_dev;
  1261. }
  1262. anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
  1263. if (IS_ERR(anv->ctrl.admin_q)) {
  1264. ret = -ENOMEM;
  1265. goto put_dev;
  1266. }
  1267. if (!blk_get_queue(anv->ctrl.admin_q)) {
  1268. nvme_start_admin_queue(&anv->ctrl);
  1269. blk_mq_destroy_queue(anv->ctrl.admin_q);
  1270. blk_put_queue(anv->ctrl.admin_q);
  1271. anv->ctrl.admin_q = NULL;
  1272. ret = -ENODEV;
  1273. goto put_dev;
  1274. }
  1275. nvme_reset_ctrl(&anv->ctrl);
  1276. async_schedule(apple_nvme_async_probe, anv);
  1277. return 0;
  1278. put_dev:
  1279. put_device(anv->dev);
  1280. return ret;
  1281. }
  1282. static int apple_nvme_remove(struct platform_device *pdev)
  1283. {
  1284. struct apple_nvme *anv = platform_get_drvdata(pdev);
  1285. nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
  1286. flush_work(&anv->ctrl.reset_work);
  1287. nvme_stop_ctrl(&anv->ctrl);
  1288. nvme_remove_namespaces(&anv->ctrl);
  1289. apple_nvme_disable(anv, true);
  1290. nvme_uninit_ctrl(&anv->ctrl);
  1291. if (apple_rtkit_is_running(anv->rtk))
  1292. apple_rtkit_shutdown(anv->rtk);
  1293. apple_nvme_detach_genpd(anv);
  1294. return 0;
  1295. }
  1296. static void apple_nvme_shutdown(struct platform_device *pdev)
  1297. {
  1298. struct apple_nvme *anv = platform_get_drvdata(pdev);
  1299. apple_nvme_disable(anv, true);
  1300. if (apple_rtkit_is_running(anv->rtk))
  1301. apple_rtkit_shutdown(anv->rtk);
  1302. }
  1303. static int apple_nvme_resume(struct device *dev)
  1304. {
  1305. struct apple_nvme *anv = dev_get_drvdata(dev);
  1306. return nvme_reset_ctrl(&anv->ctrl);
  1307. }
  1308. static int apple_nvme_suspend(struct device *dev)
  1309. {
  1310. struct apple_nvme *anv = dev_get_drvdata(dev);
  1311. int ret = 0;
  1312. apple_nvme_disable(anv, true);
  1313. if (apple_rtkit_is_running(anv->rtk))
  1314. ret = apple_rtkit_shutdown(anv->rtk);
  1315. writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
  1316. return ret;
  1317. }
  1318. static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
  1319. apple_nvme_resume);
  1320. static const struct of_device_id apple_nvme_of_match[] = {
  1321. { .compatible = "apple,nvme-ans2" },
  1322. {},
  1323. };
  1324. MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
  1325. static struct platform_driver apple_nvme_driver = {
  1326. .driver = {
  1327. .name = "nvme-apple",
  1328. .of_match_table = apple_nvme_of_match,
  1329. .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
  1330. },
  1331. .probe = apple_nvme_probe,
  1332. .remove = apple_nvme_remove,
  1333. .shutdown = apple_nvme_shutdown,
  1334. };
  1335. module_platform_driver(apple_nvme_driver);
  1336. MODULE_AUTHOR("Sven Peter <[email protected]>");
  1337. MODULE_LICENSE("GPL");