ssg-iosched.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SamSung Generic I/O scheduler
  4. * for the blk-mq scheduling framework
  5. *
  6. * Copyright (C) 2021 Jisoo Oh <[email protected]>
  7. * Copyright (C) 2021 Manjong Lee <[email protected]>
  8. * Copyright (C) 2021 Changheun Lee <[email protected]>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/fs.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/blk-mq.h>
  14. #include <linux/bio.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/init.h>
  18. #include <linux/compiler.h>
  19. #include <linux/rbtree.h>
  20. #include <linux/sbitmap.h>
  21. #include <trace/events/block.h>
  22. #include "blk.h"
  23. #include "elevator.h"
  24. #include "blk-mq.h"
  25. #include "blk-mq-debugfs.h"
  26. #include "blk-mq-tag.h"
  27. #include "blk-mq-sched.h"
  28. #include "ssg.h"
  29. #include "blk-sec.h"
  30. #define MAX_ASYNC_WRITE_RQS 8
  31. static const int read_expire = HZ / 2; /* max time before a read is submitted. */
  32. static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  33. static const int max_write_starvation = 2; /* max times reads can starve a write */
  34. static const int congestion_threshold = 90; /* percentage of congestion threshold */
  35. static const int max_tgroup_io_ratio = 50; /* maximum service ratio for each thread group */
  36. static const int max_async_write_ratio = 25; /* maximum service ratio for async write */
  37. static inline struct rb_root *ssg_rb_root(struct ssg_data *ssg, struct request *rq)
  38. {
  39. return &ssg->sort_list[rq_data_dir(rq)];
  40. }
  41. /*
  42. * get the request after `rq' in sector-sorted order
  43. */
  44. static inline struct request *ssg_latter_request(struct request *rq)
  45. {
  46. struct rb_node *node = rb_next(&rq->rb_node);
  47. if (node)
  48. return rb_entry_rq(node);
  49. return NULL;
  50. }
  51. static void ssg_add_rq_rb(struct ssg_data *ssg, struct request *rq)
  52. {
  53. struct rb_root *root = ssg_rb_root(ssg, rq);
  54. elv_rb_add(root, rq);
  55. }
  56. static inline void ssg_del_rq_rb(struct ssg_data *ssg, struct request *rq)
  57. {
  58. const int data_dir = rq_data_dir(rq);
  59. if (ssg->next_rq[data_dir] == rq)
  60. ssg->next_rq[data_dir] = ssg_latter_request(rq);
  61. elv_rb_del(ssg_rb_root(ssg, rq), rq);
  62. }
  63. static inline struct ssg_request_info *ssg_rq_info(struct ssg_data *ssg,
  64. struct request *rq)
  65. {
  66. if (unlikely(!ssg->rq_info))
  67. return NULL;
  68. if (unlikely(!rq))
  69. return NULL;
  70. if (unlikely(rq->internal_tag < 0))
  71. return NULL;
  72. if (unlikely(rq->internal_tag >= rq->q->nr_requests))
  73. return NULL;
  74. return &ssg->rq_info[rq->internal_tag];
  75. }
  76. /*
  77. * remove rq from rbtree and fifo.
  78. */
  79. static void ssg_remove_request(struct request_queue *q, struct request *rq)
  80. {
  81. struct ssg_data *ssg = q->elevator->elevator_data;
  82. list_del_init(&rq->queuelist);
  83. /*
  84. * We might not be on the rbtree, if we are doing an insert merge
  85. */
  86. if (!RB_EMPTY_NODE(&rq->rb_node))
  87. ssg_del_rq_rb(ssg, rq);
  88. elv_rqhash_del(q, rq);
  89. if (q->last_merge == rq)
  90. q->last_merge = NULL;
  91. }
  92. static void ssg_request_merged(struct request_queue *q, struct request *req,
  93. enum elv_merge type)
  94. {
  95. struct ssg_data *ssg = q->elevator->elevator_data;
  96. /*
  97. * if the merge was a front merge, we need to reposition request
  98. */
  99. if (type == ELEVATOR_FRONT_MERGE) {
  100. elv_rb_del(ssg_rb_root(ssg, req), req);
  101. ssg_add_rq_rb(ssg, req);
  102. }
  103. }
  104. static void ssg_merged_requests(struct request_queue *q, struct request *req,
  105. struct request *next)
  106. {
  107. /*
  108. * if next expires before rq, assign its expire time to rq
  109. * and move into next position (next will be deleted) in fifo
  110. */
  111. if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
  112. if (time_before((unsigned long)next->fifo_time,
  113. (unsigned long)req->fifo_time)) {
  114. list_move(&req->queuelist, &next->queuelist);
  115. req->fifo_time = next->fifo_time;
  116. }
  117. }
  118. /*
  119. * kill knowledge of next, this one is a goner
  120. */
  121. ssg_remove_request(q, next);
  122. }
  123. /*
  124. * move an entry to dispatch queue
  125. */
  126. static void ssg_move_request(struct ssg_data *ssg, struct request *rq)
  127. {
  128. const int data_dir = rq_data_dir(rq);
  129. ssg->next_rq[READ] = NULL;
  130. ssg->next_rq[WRITE] = NULL;
  131. ssg->next_rq[data_dir] = ssg_latter_request(rq);
  132. /*
  133. * take it off the sort and fifo list
  134. */
  135. ssg_remove_request(rq->q, rq);
  136. }
  137. /*
  138. * ssg_check_fifo returns 0 if there are no expired requests on the fifo,
  139. * 1 otherwise. Requires !list_empty(&ssg->fifo_list[data_dir])
  140. */
  141. static inline int ssg_check_fifo(struct ssg_data *ssg, int ddir)
  142. {
  143. struct request *rq = rq_entry_fifo(ssg->fifo_list[ddir].next);
  144. /*
  145. * rq is expired!
  146. */
  147. if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
  148. return 1;
  149. return 0;
  150. }
  151. /*
  152. * For the specified data direction, return the next request to
  153. * dispatch using arrival ordered lists.
  154. */
  155. static struct request *ssg_fifo_request(struct ssg_data *ssg, int data_dir)
  156. {
  157. struct request *rq;
  158. unsigned long flags;
  159. if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
  160. return NULL;
  161. if (list_empty(&ssg->fifo_list[data_dir]))
  162. return NULL;
  163. rq = rq_entry_fifo(ssg->fifo_list[data_dir].next);
  164. if (data_dir == READ || !blk_queue_is_zoned(rq->q))
  165. return rq;
  166. /*
  167. * Look for a write request that can be dispatched, that is one with
  168. * an unlocked target zone.
  169. */
  170. spin_lock_irqsave(&ssg->zone_lock, flags);
  171. list_for_each_entry(rq, &ssg->fifo_list[WRITE], queuelist) {
  172. if (blk_req_can_dispatch_to_zone(rq))
  173. goto out;
  174. }
  175. rq = NULL;
  176. out:
  177. spin_unlock_irqrestore(&ssg->zone_lock, flags);
  178. return rq;
  179. }
  180. /*
  181. * For the specified data direction, return the next request to
  182. * dispatch using sector position sorted lists.
  183. */
  184. static struct request *ssg_next_request(struct ssg_data *ssg, int data_dir)
  185. {
  186. struct request *rq;
  187. unsigned long flags;
  188. if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
  189. return NULL;
  190. rq = ssg->next_rq[data_dir];
  191. if (!rq)
  192. return NULL;
  193. if (data_dir == READ || !blk_queue_is_zoned(rq->q))
  194. return rq;
  195. /*
  196. * Look for a write request that can be dispatched, that is one with
  197. * an unlocked target zone.
  198. */
  199. spin_lock_irqsave(&ssg->zone_lock, flags);
  200. while (rq) {
  201. if (blk_req_can_dispatch_to_zone(rq))
  202. break;
  203. rq = ssg_latter_request(rq);
  204. }
  205. spin_unlock_irqrestore(&ssg->zone_lock, flags);
  206. return rq;
  207. }
  208. /*
  209. * ssg_dispatch_requests selects the best request according to
  210. * read/write expire, etc
  211. */
  212. static struct request *__ssg_dispatch_request(struct ssg_data *ssg)
  213. {
  214. struct request *rq, *next_rq;
  215. bool reads, writes;
  216. int data_dir;
  217. if (!list_empty(&ssg->dispatch)) {
  218. rq = list_first_entry(&ssg->dispatch, struct request, queuelist);
  219. list_del_init(&rq->queuelist);
  220. goto done;
  221. }
  222. reads = !list_empty(&ssg->fifo_list[READ]);
  223. writes = !list_empty(&ssg->fifo_list[WRITE]);
  224. /*
  225. * select the appropriate data direction (read / write)
  226. */
  227. if (reads) {
  228. BUG_ON(RB_EMPTY_ROOT(&ssg->sort_list[READ]));
  229. if (ssg_fifo_request(ssg, WRITE) &&
  230. (ssg->starved_writes++ >= ssg->max_write_starvation))
  231. goto dispatch_writes;
  232. data_dir = READ;
  233. goto dispatch_find_request;
  234. }
  235. /*
  236. * there are either no reads or writes have been starved
  237. */
  238. if (writes) {
  239. dispatch_writes:
  240. BUG_ON(RB_EMPTY_ROOT(&ssg->sort_list[WRITE]));
  241. ssg->starved_writes = 0;
  242. data_dir = WRITE;
  243. goto dispatch_find_request;
  244. }
  245. return NULL;
  246. dispatch_find_request:
  247. /*
  248. * we are not running a batch, find best request for selected data_dir
  249. */
  250. next_rq = ssg_next_request(ssg, data_dir);
  251. if (ssg_check_fifo(ssg, data_dir) || !next_rq) {
  252. /*
  253. * A deadline has expired, the last request was in the other
  254. * direction, or we have run out of higher-sectored requests.
  255. * Start again from the request with the earliest expiry time.
  256. */
  257. rq = ssg_fifo_request(ssg, data_dir);
  258. } else {
  259. /*
  260. * The last req was the same dir and we have a next request in
  261. * sort order. No expired requests so continue on from here.
  262. */
  263. rq = next_rq;
  264. }
  265. /*
  266. * For a zoned block device, if we only have writes queued and none of
  267. * them can be dispatched, rq will be NULL.
  268. */
  269. if (!rq)
  270. return NULL;
  271. /*
  272. * rq is the selected appropriate request.
  273. */
  274. ssg_move_request(ssg, rq);
  275. done:
  276. /*
  277. * If the request needs its target zone locked, do it.
  278. */
  279. blk_req_zone_write_lock(rq);
  280. rq->rq_flags |= RQF_STARTED;
  281. return rq;
  282. }
  283. /*
  284. * One confusing aspect here is that we get called for a specific
  285. * hardware queue, but we may return a request that is for a
  286. * different hardware queue. This is because ssg-iosched has shared
  287. * state for all hardware queues, in terms of sorting, FIFOs, etc.
  288. */
  289. static struct request *ssg_dispatch_request(struct blk_mq_hw_ctx *hctx)
  290. {
  291. struct ssg_data *ssg = hctx->queue->elevator->elevator_data;
  292. struct request *rq;
  293. struct ssg_request_info *rqi;
  294. spin_lock(&ssg->lock);
  295. rq = __ssg_dispatch_request(ssg);
  296. spin_unlock(&ssg->lock);
  297. rqi = ssg_rq_info(ssg, rq);
  298. if (likely(rqi)) {
  299. rqi->sector = blk_rq_pos(rq);
  300. rqi->data_size = blk_rq_bytes(rq);
  301. }
  302. return rq;
  303. }
  304. static void ssg_completed_request(struct request *rq, u64 now)
  305. {
  306. struct ssg_data *ssg = rq->q->elevator->elevator_data;
  307. struct ssg_request_info *rqi;
  308. rqi = ssg_rq_info(ssg, rq);
  309. if (likely(rqi && rqi->sector == blk_rq_pos(rq))) {
  310. ssg_stat_account_io_done(ssg, rq, rqi->data_size, now);
  311. blk_sec_stat_account_io_complete(rq, rqi->data_size, rqi->pio);
  312. }
  313. }
  314. static void ssg_set_shallow_depth(struct ssg_data *ssg, struct blk_mq_tags *tags)
  315. {
  316. unsigned int depth = tags->bitmap_tags.sb.depth;
  317. unsigned int map_nr = tags->bitmap_tags.sb.map_nr;
  318. ssg->max_async_write_rqs =
  319. max_t(int, depth * max_async_write_ratio / 100U, 1);
  320. ssg->max_async_write_rqs =
  321. min_t(int, ssg->max_async_write_rqs, MAX_ASYNC_WRITE_RQS);
  322. ssg->async_write_shallow_depth =
  323. max_t(unsigned int, ssg->max_async_write_rqs / map_nr, 1);
  324. ssg->max_tgroup_rqs =
  325. max_t(int, depth * max_tgroup_io_ratio / 100U, 1);
  326. ssg->tgroup_shallow_depth =
  327. max_t(unsigned int, ssg->max_tgroup_rqs / map_nr, 1);
  328. }
  329. static void ssg_depth_updated(struct blk_mq_hw_ctx *hctx)
  330. {
  331. struct request_queue *q = hctx->queue;
  332. struct ssg_data *ssg = q->elevator->elevator_data;
  333. struct blk_mq_tags *tags = hctx->sched_tags;
  334. unsigned int depth = tags->bitmap_tags.sb.depth;
  335. ssg->congestion_threshold_rqs = depth * congestion_threshold / 100U;
  336. kfree(ssg->rq_info);
  337. ssg->rq_info = kmalloc_array(depth, sizeof(struct ssg_request_info),
  338. GFP_KERNEL | __GFP_ZERO);
  339. if (ZERO_OR_NULL_PTR(ssg->rq_info))
  340. ssg->rq_info = NULL;
  341. ssg_set_shallow_depth(ssg, tags);
  342. sbitmap_queue_min_shallow_depth(&tags->bitmap_tags,
  343. ssg->async_write_shallow_depth);
  344. ssg_blkcg_depth_updated(hctx);
  345. ssg_wb_depth_updated(hctx);
  346. }
  347. static inline bool ssg_op_is_async_write(unsigned int op)
  348. {
  349. return (op & REQ_OP_MASK) == REQ_OP_WRITE && !op_is_sync(op);
  350. }
  351. static unsigned int ssg_async_write_shallow_depth(unsigned int op,
  352. struct blk_mq_alloc_data *data)
  353. {
  354. struct ssg_data *ssg = data->q->elevator->elevator_data;
  355. if (!ssg_op_is_async_write(op))
  356. return 0;
  357. if (atomic_read(&ssg->async_write_rqs) < ssg->max_async_write_rqs)
  358. return 0;
  359. return ssg->async_write_shallow_depth;
  360. }
  361. static unsigned int ssg_tgroup_shallow_depth(struct blk_mq_alloc_data *data)
  362. {
  363. struct ssg_data *ssg = data->q->elevator->elevator_data;
  364. pid_t tgid = task_tgid_nr(current->group_leader);
  365. int nr_requests = data->q->nr_requests;
  366. int tgroup_rqs = 0;
  367. int i;
  368. if (unlikely(!ssg->rq_info))
  369. return 0;
  370. for (i = 0; i < nr_requests; i++)
  371. if (tgid == ssg->rq_info[i].tgid)
  372. tgroup_rqs++;
  373. if (tgroup_rqs < ssg->max_tgroup_rqs)
  374. return 0;
  375. return ssg->tgroup_shallow_depth;
  376. }
  377. static void ssg_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
  378. {
  379. struct ssg_data *ssg = data->q->elevator->elevator_data;
  380. unsigned int shallow_depth = ssg_blkcg_shallow_depth(data->q);
  381. shallow_depth = min_not_zero(shallow_depth,
  382. ssg_async_write_shallow_depth(op, data));
  383. if (atomic_read(&ssg->allocated_rqs) > ssg->congestion_threshold_rqs)
  384. shallow_depth = min_not_zero(shallow_depth,
  385. ssg_tgroup_shallow_depth(data));
  386. data->shallow_depth = shallow_depth;
  387. }
  388. static int ssg_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  389. {
  390. struct ssg_data *ssg = hctx->queue->elevator->elevator_data;
  391. struct blk_mq_tags *tags = hctx->sched_tags;
  392. ssg_set_shallow_depth(ssg, tags);
  393. sbitmap_queue_min_shallow_depth(&tags->bitmap_tags,
  394. ssg->async_write_shallow_depth);
  395. return 0;
  396. }
  397. static void ssg_exit_queue(struct elevator_queue *e)
  398. {
  399. struct ssg_data *ssg = e->elevator_data;
  400. ssg_blkcg_deactivate(ssg->queue);
  401. BUG_ON(!list_empty(&ssg->fifo_list[READ]));
  402. BUG_ON(!list_empty(&ssg->fifo_list[WRITE]));
  403. ssg_stat_exit(ssg);
  404. ssg_wb_exit(ssg);
  405. blk_sec_stat_account_exit(e);
  406. blk_stat_disable_accounting(ssg->queue);
  407. kfree(ssg->rq_info);
  408. kfree(ssg);
  409. }
  410. /*
  411. * initialize elevator private data (ssg_data).
  412. */
  413. static int ssg_init_queue(struct request_queue *q, struct elevator_type *e)
  414. {
  415. struct ssg_data *ssg;
  416. struct elevator_queue *eq;
  417. eq = elevator_alloc(q, e);
  418. if (!eq)
  419. return -ENOMEM;
  420. ssg = kzalloc_node(sizeof(*ssg), GFP_KERNEL, q->node);
  421. if (!ssg) {
  422. kobject_put(&eq->kobj);
  423. return -ENOMEM;
  424. }
  425. eq->elevator_data = ssg;
  426. ssg->queue = q;
  427. INIT_LIST_HEAD(&ssg->fifo_list[READ]);
  428. INIT_LIST_HEAD(&ssg->fifo_list[WRITE]);
  429. ssg->sort_list[READ] = RB_ROOT;
  430. ssg->sort_list[WRITE] = RB_ROOT;
  431. ssg->fifo_expire[READ] = read_expire;
  432. ssg->fifo_expire[WRITE] = write_expire;
  433. ssg->max_write_starvation = max_write_starvation;
  434. ssg->front_merges = 1;
  435. atomic_set(&ssg->allocated_rqs, 0);
  436. atomic_set(&ssg->async_write_rqs, 0);
  437. ssg->congestion_threshold_rqs =
  438. q->nr_requests * congestion_threshold / 100U;
  439. ssg->rq_info = kmalloc_array(q->nr_requests,
  440. sizeof(struct ssg_request_info),
  441. GFP_KERNEL | __GFP_ZERO);
  442. if (ZERO_OR_NULL_PTR(ssg->rq_info))
  443. ssg->rq_info = NULL;
  444. spin_lock_init(&ssg->lock);
  445. spin_lock_init(&ssg->zone_lock);
  446. INIT_LIST_HEAD(&ssg->dispatch);
  447. ssg_blkcg_activate(q);
  448. q->elevator = eq;
  449. ssg_stat_init(ssg);
  450. blk_stat_enable_accounting(q);
  451. blk_sec_stat_account_init(q);
  452. ssg_wb_init(ssg);
  453. return 0;
  454. }
  455. static int ssg_request_merge(struct request_queue *q, struct request **rq,
  456. struct bio *bio)
  457. {
  458. struct ssg_data *ssg = q->elevator->elevator_data;
  459. sector_t sector = bio_end_sector(bio);
  460. struct request *__rq;
  461. if (!ssg->front_merges)
  462. return ELEVATOR_NO_MERGE;
  463. __rq = elv_rb_find(&ssg->sort_list[bio_data_dir(bio)], sector);
  464. if (__rq) {
  465. BUG_ON(sector != blk_rq_pos(__rq));
  466. if (elv_bio_merge_ok(__rq, bio)) {
  467. *rq = __rq;
  468. return ELEVATOR_FRONT_MERGE;
  469. }
  470. }
  471. return ELEVATOR_NO_MERGE;
  472. }
  473. static bool ssg_bio_merge(struct request_queue *q, struct bio *bio,
  474. unsigned int nr_segs)
  475. {
  476. struct ssg_data *ssg = q->elevator->elevator_data;
  477. struct request *free = NULL;
  478. bool ret;
  479. spin_lock(&ssg->lock);
  480. ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
  481. spin_unlock(&ssg->lock);
  482. if (free)
  483. blk_mq_free_request(free);
  484. return ret;
  485. }
  486. /*
  487. * add rq to rbtree and fifo
  488. */
  489. static void ssg_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  490. bool at_head)
  491. {
  492. struct request_queue *q = hctx->queue;
  493. struct ssg_data *ssg = q->elevator->elevator_data;
  494. const int data_dir = rq_data_dir(rq);
  495. LIST_HEAD(free);
  496. /*
  497. * This may be a requeue of a write request that has locked its
  498. * target zone. If it is the case, this releases the zone lock.
  499. */
  500. blk_req_zone_write_unlock(rq);
  501. if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
  502. blk_mq_free_requests(&free);
  503. return;
  504. }
  505. trace_block_rq_insert(rq);
  506. if (at_head || blk_rq_is_passthrough(rq)) {
  507. if (at_head)
  508. list_add(&rq->queuelist, &ssg->dispatch);
  509. else
  510. list_add_tail(&rq->queuelist, &ssg->dispatch);
  511. } else {
  512. ssg_add_rq_rb(ssg, rq);
  513. if (rq_mergeable(rq)) {
  514. elv_rqhash_add(q, rq);
  515. if (!q->last_merge)
  516. q->last_merge = rq;
  517. }
  518. /*
  519. * set expire time and add to fifo list
  520. */
  521. rq->fifo_time = jiffies + ssg->fifo_expire[data_dir];
  522. list_add_tail(&rq->queuelist, &ssg->fifo_list[data_dir]);
  523. }
  524. }
  525. static void ssg_insert_requests(struct blk_mq_hw_ctx *hctx,
  526. struct list_head *list, bool at_head)
  527. {
  528. struct request_queue *q = hctx->queue;
  529. struct ssg_data *ssg = q->elevator->elevator_data;
  530. spin_lock(&ssg->lock);
  531. while (!list_empty(list)) {
  532. struct request *rq;
  533. rq = list_first_entry(list, struct request, queuelist);
  534. list_del_init(&rq->queuelist);
  535. ssg_insert_request(hctx, rq, at_head);
  536. }
  537. spin_unlock(&ssg->lock);
  538. }
  539. /*
  540. * Nothing to do here. This is defined only to ensure that .finish_request
  541. * method is called upon request completion.
  542. */
  543. static void ssg_prepare_request(struct request *rq)
  544. {
  545. struct ssg_data *ssg = rq->q->elevator->elevator_data;
  546. struct ssg_request_info *rqi;
  547. atomic_inc(&ssg->allocated_rqs);
  548. ssg_wb_run_ctrl_work(ssg, rq);
  549. rqi = ssg_rq_info(ssg, rq);
  550. if (likely(rqi)) {
  551. rqi->tgid = task_tgid_nr(current->group_leader);
  552. rcu_read_lock();
  553. rqi->blkg = blkg_lookup(css_to_blkcg(curr_css()), rq->q);
  554. ssg_blkcg_inc_rq(rqi->blkg);
  555. rcu_read_unlock();
  556. blk_sec_stat_account_io_prepare(rq, &rqi->pio);
  557. }
  558. if (ssg_op_is_async_write(rq->cmd_flags))
  559. atomic_inc(&ssg->async_write_rqs);
  560. }
  561. /*
  562. * For zoned block devices, write unlock the target zone of
  563. * completed write requests. Do this while holding the zone lock
  564. * spinlock so that the zone is never unlocked while ssg_fifo_request()
  565. * or ssg_next_request() are executing. This function is called for
  566. * all requests, whether or not these requests complete successfully.
  567. *
  568. * For a zoned block device, __ssg_dispatch_request() may have stopped
  569. * dispatching requests if all the queued requests are write requests directed
  570. * at zones that are already locked due to on-going write requests. To ensure
  571. * write request dispatch progress in this case, mark the queue as needing a
  572. * restart to ensure that the queue is run again after completion of the
  573. * request and zones being unlocked.
  574. */
  575. static void ssg_finish_request(struct request *rq)
  576. {
  577. struct request_queue *q = rq->q;
  578. struct ssg_data *ssg = q->elevator->elevator_data;
  579. struct ssg_request_info *rqi;
  580. if (blk_queue_is_zoned(q)) {
  581. unsigned long flags;
  582. spin_lock_irqsave(&ssg->zone_lock, flags);
  583. blk_req_zone_write_unlock(rq);
  584. if (!list_empty(&ssg->fifo_list[WRITE]))
  585. blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
  586. spin_unlock_irqrestore(&ssg->zone_lock, flags);
  587. }
  588. if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
  589. return;
  590. atomic_dec(&ssg->allocated_rqs);
  591. rqi = ssg_rq_info(ssg, rq);
  592. if (likely(rqi)) {
  593. rqi->tgid = 0;
  594. ssg_blkcg_dec_rq(rqi->blkg);
  595. rqi->blkg = NULL;
  596. blk_sec_stat_account_io_finish(rq, &rqi->pio);
  597. }
  598. if (ssg_op_is_async_write(rq->cmd_flags))
  599. atomic_dec(&ssg->async_write_rqs);
  600. }
  601. static bool ssg_has_work(struct blk_mq_hw_ctx *hctx)
  602. {
  603. struct ssg_data *ssg = hctx->queue->elevator->elevator_data;
  604. return !list_empty_careful(&ssg->dispatch) ||
  605. !list_empty_careful(&ssg->fifo_list[0]) ||
  606. !list_empty_careful(&ssg->fifo_list[1]);
  607. }
  608. /*
  609. * sysfs parts below
  610. */
  611. static ssize_t ssg_var_show(int var, char *page)
  612. {
  613. return sprintf(page, "%d\n", var);
  614. }
  615. static void ssg_var_store(int *var, const char *page)
  616. {
  617. long val;
  618. if (!kstrtol(page, 10, &val))
  619. *var = val;
  620. }
  621. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  622. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  623. { \
  624. struct ssg_data *ssg = e->elevator_data; \
  625. int __data = __VAR; \
  626. if (__CONV) \
  627. __data = jiffies_to_msecs(__data); \
  628. return ssg_var_show(__data, (page)); \
  629. }
  630. SHOW_FUNCTION(ssg_read_expire_show, ssg->fifo_expire[READ], 1);
  631. SHOW_FUNCTION(ssg_write_expire_show, ssg->fifo_expire[WRITE], 1);
  632. SHOW_FUNCTION(ssg_max_write_starvation_show, ssg->max_write_starvation, 0);
  633. SHOW_FUNCTION(ssg_front_merges_show, ssg->front_merges, 0);
  634. SHOW_FUNCTION(ssg_max_tgroup_rqs_show, ssg->max_tgroup_rqs, 0);
  635. SHOW_FUNCTION(ssg_max_async_write_rqs_show, ssg->max_async_write_rqs, 0);
  636. SHOW_FUNCTION(ssg_tgroup_shallow_depth_show, ssg->tgroup_shallow_depth, 0);
  637. SHOW_FUNCTION(ssg_async_write_shallow_depth_show, ssg->async_write_shallow_depth, 0);
  638. #undef SHOW_FUNCTION
  639. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  640. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  641. { \
  642. struct ssg_data *ssg = e->elevator_data; \
  643. int __data; \
  644. ssg_var_store(&__data, (page)); \
  645. if (__data < (MIN)) \
  646. __data = (MIN); \
  647. else if (__data > (MAX)) \
  648. __data = (MAX); \
  649. if (__CONV) \
  650. *(__PTR) = msecs_to_jiffies(__data); \
  651. else \
  652. *(__PTR) = __data; \
  653. return count; \
  654. }
  655. STORE_FUNCTION(ssg_read_expire_store, &ssg->fifo_expire[READ], 0, INT_MAX, 1);
  656. STORE_FUNCTION(ssg_write_expire_store, &ssg->fifo_expire[WRITE], 0, INT_MAX, 1);
  657. STORE_FUNCTION(ssg_max_write_starvation_store, &ssg->max_write_starvation, INT_MIN, INT_MAX, 0);
  658. STORE_FUNCTION(ssg_front_merges_store, &ssg->front_merges, 0, 1, 0);
  659. #undef STORE_FUNCTION
  660. #define SSG_ATTR(name) \
  661. __ATTR(name, 0644, ssg_##name##_show, ssg_##name##_store)
  662. #define SSG_ATTR_RO(name) \
  663. __ATTR(name, 0444, ssg_##name##_show, NULL)
  664. #define SSG_STAT_ATTR_RO(name) \
  665. __ATTR(name, 0444, ssg_stat_##name##_show, NULL)
  666. static struct elv_fs_entry ssg_attrs[] = {
  667. SSG_ATTR(read_expire),
  668. SSG_ATTR(write_expire),
  669. SSG_ATTR(max_write_starvation),
  670. SSG_ATTR(front_merges),
  671. SSG_ATTR_RO(max_tgroup_rqs),
  672. SSG_ATTR_RO(max_async_write_rqs),
  673. SSG_ATTR_RO(tgroup_shallow_depth),
  674. SSG_ATTR_RO(async_write_shallow_depth),
  675. SSG_STAT_ATTR_RO(read_latency),
  676. SSG_STAT_ATTR_RO(write_latency),
  677. SSG_STAT_ATTR_RO(flush_latency),
  678. SSG_STAT_ATTR_RO(discard_latency),
  679. SSG_STAT_ATTR_RO(inflight),
  680. SSG_STAT_ATTR_RO(rqs_info),
  681. #if IS_ENABLED(CONFIG_MQ_IOSCHED_SSG_WB)
  682. SSG_ATTR(wb_on_rqs),
  683. SSG_ATTR(wb_off_rqs),
  684. SSG_ATTR(wb_on_dirty_bytes),
  685. SSG_ATTR(wb_off_dirty_bytes),
  686. SSG_ATTR(wb_on_sync_write_bytes),
  687. SSG_ATTR(wb_off_sync_write_bytes),
  688. SSG_ATTR(wb_on_dirty_busy_written_bytes),
  689. SSG_ATTR(wb_on_dirty_busy_msecs),
  690. SSG_ATTR(wb_off_delay_msecs),
  691. SSG_ATTR_RO(wb_triggered),
  692. #endif
  693. __ATTR_NULL
  694. };
  695. static struct elevator_type ssg_iosched = {
  696. .ops = {
  697. .insert_requests = ssg_insert_requests,
  698. .dispatch_request = ssg_dispatch_request,
  699. .completed_request = ssg_completed_request,
  700. .prepare_request = ssg_prepare_request,
  701. .finish_request = ssg_finish_request,
  702. .next_request = elv_rb_latter_request,
  703. .former_request = elv_rb_former_request,
  704. .bio_merge = ssg_bio_merge,
  705. .request_merge = ssg_request_merge,
  706. .requests_merged = ssg_merged_requests,
  707. .request_merged = ssg_request_merged,
  708. .has_work = ssg_has_work,
  709. .limit_depth = ssg_limit_depth,
  710. .depth_updated = ssg_depth_updated,
  711. .init_hctx = ssg_init_hctx,
  712. .init_sched = ssg_init_queue,
  713. .exit_sched = ssg_exit_queue,
  714. },
  715. .elevator_attrs = ssg_attrs,
  716. .elevator_name = "ssg",
  717. .elevator_alias = "ssg",
  718. .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
  719. .elevator_owner = THIS_MODULE,
  720. };
  721. MODULE_ALIAS("ssg");
  722. static int __init ssg_iosched_init(void)
  723. {
  724. int ret;
  725. ret = elv_register(&ssg_iosched);
  726. if (ret)
  727. return ret;
  728. ret = ssg_blkcg_init();
  729. if (ret) {
  730. elv_unregister(&ssg_iosched);
  731. return ret;
  732. }
  733. return ret;
  734. }
  735. static void __exit ssg_iosched_exit(void)
  736. {
  737. ssg_blkcg_exit();
  738. elv_unregister(&ssg_iosched);
  739. }
  740. module_init(ssg_iosched_init);
  741. module_exit(ssg_iosched_exit);
  742. MODULE_AUTHOR("Jisoo Oh");
  743. MODULE_LICENSE("GPL");
  744. MODULE_DESCRIPTION("SSG IO Scheduler");