blkdev.h 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Portions Copyright (C) 1992 Drew Eckhardt
  4. */
  5. #ifndef _LINUX_BLKDEV_H
  6. #define _LINUX_BLKDEV_H
  7. #include <linux/types.h>
  8. #include <linux/blk_types.h>
  9. #include <linux/device.h>
  10. #include <linux/list.h>
  11. #include <linux/llist.h>
  12. #include <linux/minmax.h>
  13. #include <linux/timer.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/wait.h>
  16. #include <linux/bio.h>
  17. #include <linux/gfp.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/rcupdate.h>
  20. #include <linux/percpu-refcount.h>
  21. #include <linux/blkzoned.h>
  22. #include <linux/sched.h>
  23. #include <linux/sbitmap.h>
  24. #include <linux/srcu.h>
  25. #include <linux/uuid.h>
  26. #include <linux/xarray.h>
  27. #include <linux/android_kabi.h>
  28. struct module;
  29. struct request_queue;
  30. struct elevator_queue;
  31. struct blk_trace;
  32. struct request;
  33. struct sg_io_hdr;
  34. struct blkcg_gq;
  35. struct blk_flush_queue;
  36. struct kiocb;
  37. struct pr_ops;
  38. struct rq_qos;
  39. struct blk_queue_stats;
  40. struct blk_stat_callback;
  41. struct blk_crypto_profile;
  42. extern const struct device_type disk_type;
  43. extern struct device_type part_type;
  44. extern struct class block_class;
  45. /* Must be consistent with blk_mq_poll_stats_bkt() */
  46. #define BLK_MQ_POLL_STATS_BKTS 16
  47. /* Doing classic polling */
  48. #define BLK_MQ_POLL_CLASSIC -1
  49. /*
  50. * Maximum number of blkcg policies allowed to be registered concurrently.
  51. * Defined here to simplify include dependency.
  52. */
  53. #define BLKCG_MAX_POLS 6
  54. #define DISK_MAX_PARTS 256
  55. #define DISK_NAME_LEN 32
  56. #define PARTITION_META_INFO_VOLNAMELTH 64
  57. /*
  58. * Enough for the string representation of any kind of UUID plus NULL.
  59. * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
  60. */
  61. #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
  62. struct partition_meta_info {
  63. char uuid[PARTITION_META_INFO_UUIDLTH];
  64. u8 volname[PARTITION_META_INFO_VOLNAMELTH];
  65. };
  66. /**
  67. * DOC: genhd capability flags
  68. *
  69. * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
  70. * removable media. When set, the device remains present even when media is not
  71. * inserted. Shall not be set for devices which are removed entirely when the
  72. * media is removed.
  73. *
  74. * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
  75. * doesn't appear in sysfs, and can't be opened from userspace or using
  76. * blkdev_get*. Used for the underlying components of multipath devices.
  77. *
  78. * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
  79. * scan for partitions from add_disk, and users can't add partitions manually.
  80. *
  81. */
  82. enum {
  83. GENHD_FL_REMOVABLE = 1 << 0,
  84. GENHD_FL_HIDDEN = 1 << 1,
  85. GENHD_FL_NO_PART = 1 << 2,
  86. };
  87. enum {
  88. DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
  89. DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
  90. };
  91. enum {
  92. /* Poll even if events_poll_msecs is unset */
  93. DISK_EVENT_FLAG_POLL = 1 << 0,
  94. /* Forward events to udev */
  95. DISK_EVENT_FLAG_UEVENT = 1 << 1,
  96. /* Block event polling when open for exclusive write */
  97. DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
  98. };
  99. struct disk_events;
  100. struct badblocks;
  101. struct blk_integrity {
  102. const struct blk_integrity_profile *profile;
  103. unsigned char flags;
  104. unsigned char tuple_size;
  105. unsigned char interval_exp;
  106. unsigned char tag_size;
  107. ANDROID_KABI_RESERVE(1);
  108. ANDROID_KABI_RESERVE(2);
  109. };
  110. struct gendisk {
  111. /*
  112. * major/first_minor/minors should not be set by any new driver, the
  113. * block core will take care of allocating them automatically.
  114. */
  115. int major;
  116. int first_minor;
  117. int minors;
  118. char disk_name[DISK_NAME_LEN]; /* name of major driver */
  119. unsigned short events; /* supported events */
  120. unsigned short event_flags; /* flags related to event processing */
  121. struct xarray part_tbl;
  122. struct block_device *part0;
  123. const struct block_device_operations *fops;
  124. struct request_queue *queue;
  125. void *private_data;
  126. struct bio_set bio_split;
  127. int flags;
  128. unsigned long state;
  129. #define GD_NEED_PART_SCAN 0
  130. #define GD_READ_ONLY 1
  131. #define GD_DEAD 2
  132. #define GD_NATIVE_CAPACITY 3
  133. #define GD_ADDED 4
  134. #define GD_SUPPRESS_PART_SCAN 5
  135. #define GD_OWNS_QUEUE 6
  136. struct mutex open_mutex; /* open/close mutex */
  137. unsigned open_partitions; /* number of open partitions */
  138. struct backing_dev_info *bdi;
  139. struct kobject *slave_dir;
  140. #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
  141. struct list_head slave_bdevs;
  142. #endif
  143. struct timer_rand_state *random;
  144. atomic_t sync_io; /* RAID */
  145. struct disk_events *ev;
  146. #ifdef CONFIG_BLK_DEV_INTEGRITY
  147. struct kobject integrity_kobj;
  148. #endif /* CONFIG_BLK_DEV_INTEGRITY */
  149. #ifdef CONFIG_BLK_DEV_ZONED
  150. /*
  151. * Zoned block device information for request dispatch control.
  152. * nr_zones is the total number of zones of the device. This is always
  153. * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
  154. * bits which indicates if a zone is conventional (bit set) or
  155. * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
  156. * bits which indicates if a zone is write locked, that is, if a write
  157. * request targeting the zone was dispatched.
  158. *
  159. * Reads of this information must be protected with blk_queue_enter() /
  160. * blk_queue_exit(). Modifying this information is only allowed while
  161. * no requests are being processed. See also blk_mq_freeze_queue() and
  162. * blk_mq_unfreeze_queue().
  163. */
  164. unsigned int nr_zones;
  165. unsigned int max_open_zones;
  166. unsigned int max_active_zones;
  167. unsigned long *conv_zones_bitmap;
  168. unsigned long *seq_zones_wlock;
  169. #endif /* CONFIG_BLK_DEV_ZONED */
  170. #if IS_ENABLED(CONFIG_CDROM)
  171. struct cdrom_device_info *cdi;
  172. #endif
  173. int node_id;
  174. struct badblocks *bb;
  175. struct lockdep_map lockdep_map;
  176. u64 diskseq;
  177. /*
  178. * Independent sector access ranges. This is always NULL for
  179. * devices that do not have multiple independent access ranges.
  180. */
  181. struct blk_independent_access_ranges *ia_ranges;
  182. ANDROID_KABI_RESERVE(1);
  183. ANDROID_KABI_RESERVE(2);
  184. ANDROID_KABI_RESERVE(3);
  185. ANDROID_KABI_RESERVE(4);
  186. };
  187. static inline bool disk_live(struct gendisk *disk)
  188. {
  189. return !inode_unhashed(disk->part0->bd_inode);
  190. }
  191. /**
  192. * disk_openers - returns how many openers are there for a disk
  193. * @disk: disk to check
  194. *
  195. * This returns the number of openers for a disk. Note that this value is only
  196. * stable if disk->open_mutex is held.
  197. *
  198. * Note: Due to a quirk in the block layer open code, each open partition is
  199. * only counted once even if there are multiple openers.
  200. */
  201. static inline unsigned int disk_openers(struct gendisk *disk)
  202. {
  203. return atomic_read(&disk->part0->bd_openers);
  204. }
  205. /*
  206. * The gendisk is refcounted by the part0 block_device, and the bd_device
  207. * therein is also used for device model presentation in sysfs.
  208. */
  209. #define dev_to_disk(device) \
  210. (dev_to_bdev(device)->bd_disk)
  211. #define disk_to_dev(disk) \
  212. (&((disk)->part0->bd_device))
  213. #if IS_REACHABLE(CONFIG_CDROM)
  214. #define disk_to_cdi(disk) ((disk)->cdi)
  215. #else
  216. #define disk_to_cdi(disk) NULL
  217. #endif
  218. static inline dev_t disk_devt(struct gendisk *disk)
  219. {
  220. return MKDEV(disk->major, disk->first_minor);
  221. }
  222. static inline int blk_validate_block_size(unsigned long bsize)
  223. {
  224. if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
  225. return -EINVAL;
  226. return 0;
  227. }
  228. static inline bool blk_op_is_passthrough(blk_opf_t op)
  229. {
  230. op &= REQ_OP_MASK;
  231. return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
  232. }
  233. /*
  234. * Zoned block device models (zoned limit).
  235. *
  236. * Note: This needs to be ordered from the least to the most severe
  237. * restrictions for the inheritance in blk_stack_limits() to work.
  238. */
  239. enum blk_zoned_model {
  240. BLK_ZONED_NONE = 0, /* Regular block device */
  241. BLK_ZONED_HA, /* Host-aware zoned block device */
  242. BLK_ZONED_HM, /* Host-managed zoned block device */
  243. };
  244. /*
  245. * BLK_BOUNCE_NONE: never bounce (default)
  246. * BLK_BOUNCE_HIGH: bounce all highmem pages
  247. */
  248. enum blk_bounce {
  249. BLK_BOUNCE_NONE,
  250. BLK_BOUNCE_HIGH,
  251. };
  252. struct queue_limits {
  253. enum blk_bounce bounce;
  254. unsigned long seg_boundary_mask;
  255. unsigned long virt_boundary_mask;
  256. unsigned int max_hw_sectors;
  257. unsigned int max_dev_sectors;
  258. unsigned int chunk_sectors;
  259. unsigned int max_sectors;
  260. unsigned int max_segment_size;
  261. unsigned int physical_block_size;
  262. unsigned int logical_block_size;
  263. unsigned int alignment_offset;
  264. unsigned int io_min;
  265. unsigned int io_opt;
  266. unsigned int max_discard_sectors;
  267. unsigned int max_hw_discard_sectors;
  268. unsigned int max_secure_erase_sectors;
  269. unsigned int max_write_zeroes_sectors;
  270. unsigned int max_zone_append_sectors;
  271. unsigned int discard_granularity;
  272. unsigned int discard_alignment;
  273. unsigned int zone_write_granularity;
  274. unsigned short max_segments;
  275. unsigned short max_integrity_segments;
  276. unsigned short max_discard_segments;
  277. unsigned char misaligned;
  278. unsigned char discard_misaligned;
  279. unsigned char raid_partial_stripes_expensive;
  280. enum blk_zoned_model zoned;
  281. /*
  282. * Drivers that set dma_alignment to less than 511 must be prepared to
  283. * handle individual bvec's that are not a multiple of a SECTOR_SIZE
  284. * due to possible offsets.
  285. */
  286. unsigned int dma_alignment;
  287. ANDROID_OEM_DATA(1);
  288. ANDROID_KABI_RESERVE(1);
  289. };
  290. typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
  291. void *data);
  292. void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
  293. #ifdef CONFIG_BLK_DEV_ZONED
  294. #define BLK_ALL_ZONES ((unsigned int)-1)
  295. int blkdev_report_zones(struct block_device *bdev, sector_t sector,
  296. unsigned int nr_zones, report_zones_cb cb, void *data);
  297. unsigned int bdev_nr_zones(struct block_device *bdev);
  298. extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
  299. sector_t sectors, sector_t nr_sectors,
  300. gfp_t gfp_mask);
  301. int blk_revalidate_disk_zones(struct gendisk *disk,
  302. void (*update_driver_data)(struct gendisk *disk));
  303. extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
  304. unsigned int cmd, unsigned long arg);
  305. extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
  306. unsigned int cmd, unsigned long arg);
  307. #else /* CONFIG_BLK_DEV_ZONED */
  308. static inline unsigned int bdev_nr_zones(struct block_device *bdev)
  309. {
  310. return 0;
  311. }
  312. static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
  313. fmode_t mode, unsigned int cmd,
  314. unsigned long arg)
  315. {
  316. return -ENOTTY;
  317. }
  318. static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
  319. fmode_t mode, unsigned int cmd,
  320. unsigned long arg)
  321. {
  322. return -ENOTTY;
  323. }
  324. #endif /* CONFIG_BLK_DEV_ZONED */
  325. /*
  326. * Independent access ranges: struct blk_independent_access_range describes
  327. * a range of contiguous sectors that can be accessed using device command
  328. * execution resources that are independent from the resources used for
  329. * other access ranges. This is typically found with single-LUN multi-actuator
  330. * HDDs where each access range is served by a different set of heads.
  331. * The set of independent ranges supported by the device is defined using
  332. * struct blk_independent_access_ranges. The independent ranges must not overlap
  333. * and must include all sectors within the disk capacity (no sector holes
  334. * allowed).
  335. * For a device with multiple ranges, requests targeting sectors in different
  336. * ranges can be executed in parallel. A request can straddle an access range
  337. * boundary.
  338. */
  339. struct blk_independent_access_range {
  340. struct kobject kobj;
  341. sector_t sector;
  342. sector_t nr_sectors;
  343. };
  344. struct blk_independent_access_ranges {
  345. struct kobject kobj;
  346. bool sysfs_registered;
  347. unsigned int nr_ia_ranges;
  348. struct blk_independent_access_range ia_range[];
  349. };
  350. struct request_queue {
  351. struct request *last_merge;
  352. struct elevator_queue *elevator;
  353. struct percpu_ref q_usage_counter;
  354. struct blk_queue_stats *stats;
  355. struct rq_qos *rq_qos;
  356. const struct blk_mq_ops *mq_ops;
  357. /* sw queues */
  358. struct blk_mq_ctx __percpu *queue_ctx;
  359. unsigned int queue_depth;
  360. /* hw dispatch queues */
  361. struct xarray hctx_table;
  362. unsigned int nr_hw_queues;
  363. /*
  364. * The queue owner gets to use this for whatever they like.
  365. * ll_rw_blk doesn't touch it.
  366. */
  367. void *queuedata;
  368. /*
  369. * various queue flags, see QUEUE_* below
  370. */
  371. unsigned long queue_flags;
  372. /*
  373. * Number of contexts that have called blk_set_pm_only(). If this
  374. * counter is above zero then only RQF_PM requests are processed.
  375. */
  376. atomic_t pm_only;
  377. /*
  378. * ida allocated id for this queue. Used to index queues from
  379. * ioctx.
  380. */
  381. int id;
  382. spinlock_t queue_lock;
  383. struct gendisk *disk;
  384. /*
  385. * queue kobject
  386. */
  387. struct kobject kobj;
  388. /*
  389. * mq queue kobject
  390. */
  391. struct kobject *mq_kobj;
  392. #ifdef CONFIG_BLK_DEV_INTEGRITY
  393. struct blk_integrity integrity;
  394. #endif /* CONFIG_BLK_DEV_INTEGRITY */
  395. #ifdef CONFIG_PM
  396. struct device *dev;
  397. enum rpm_status rpm_status;
  398. #endif
  399. /*
  400. * queue settings
  401. */
  402. unsigned long nr_requests; /* Max # of requests */
  403. unsigned int dma_pad_mask;
  404. #ifdef CONFIG_BLK_INLINE_ENCRYPTION
  405. struct blk_crypto_profile *crypto_profile;
  406. struct kobject *crypto_kobject;
  407. #endif
  408. unsigned int rq_timeout;
  409. int poll_nsec;
  410. struct blk_stat_callback *poll_cb;
  411. struct blk_rq_stat *poll_stat;
  412. struct timer_list timeout;
  413. struct work_struct timeout_work;
  414. atomic_t nr_active_requests_shared_tags;
  415. struct blk_mq_tags *sched_shared_tags;
  416. struct list_head icq_list;
  417. #ifdef CONFIG_BLK_CGROUP
  418. DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
  419. struct blkcg_gq *root_blkg;
  420. struct list_head blkg_list;
  421. #endif
  422. struct queue_limits limits;
  423. unsigned int required_elevator_features;
  424. int node;
  425. #ifdef CONFIG_BLK_DEV_IO_TRACE
  426. struct blk_trace __rcu *blk_trace;
  427. #endif
  428. /*
  429. * for flush operations
  430. */
  431. struct blk_flush_queue *fq;
  432. struct list_head requeue_list;
  433. spinlock_t requeue_lock;
  434. struct delayed_work requeue_work;
  435. struct mutex sysfs_lock;
  436. struct mutex sysfs_dir_lock;
  437. /*
  438. * for reusing dead hctx instance in case of updating
  439. * nr_hw_queues
  440. */
  441. struct list_head unused_hctx_list;
  442. spinlock_t unused_hctx_lock;
  443. int mq_freeze_depth;
  444. #ifdef CONFIG_BLK_DEV_THROTTLING
  445. /* Throttle data */
  446. struct throtl_data *td;
  447. #endif
  448. struct rcu_head rcu_head;
  449. wait_queue_head_t mq_freeze_wq;
  450. /*
  451. * Protect concurrent access to q_usage_counter by
  452. * percpu_ref_kill() and percpu_ref_reinit().
  453. */
  454. struct mutex mq_freeze_lock;
  455. int quiesce_depth;
  456. struct blk_mq_tag_set *tag_set;
  457. struct list_head tag_set_list;
  458. struct dentry *debugfs_dir;
  459. struct dentry *sched_debugfs_dir;
  460. struct dentry *rqos_debugfs_dir;
  461. /*
  462. * Serializes all debugfs metadata operations using the above dentries.
  463. */
  464. struct mutex debugfs_mutex;
  465. bool mq_sysfs_init_done;
  466. ANDROID_OEM_DATA(1);
  467. ANDROID_KABI_RESERVE(1);
  468. ANDROID_KABI_RESERVE(2);
  469. ANDROID_KABI_RESERVE(3);
  470. ANDROID_KABI_RESERVE(4);
  471. /**
  472. * @srcu: Sleepable RCU. Use as lock when type of the request queue
  473. * is blocking (BLK_MQ_F_BLOCKING). Must be the last member
  474. */
  475. struct srcu_struct srcu[];
  476. };
  477. /* Keep blk_queue_flag_name[] in sync with the definitions below */
  478. #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
  479. #define QUEUE_FLAG_DYING 1 /* queue being torn down */
  480. #define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */
  481. #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
  482. #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
  483. #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
  484. #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
  485. #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
  486. #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
  487. #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
  488. #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
  489. #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
  490. #define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */
  491. #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
  492. #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
  493. #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
  494. #define QUEUE_FLAG_WC 17 /* Write back caching */
  495. #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
  496. #define QUEUE_FLAG_DAX 19 /* device supports DAX */
  497. #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
  498. #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
  499. #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
  500. #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
  501. #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
  502. #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
  503. #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
  504. #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
  505. #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
  506. #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
  507. (1UL << QUEUE_FLAG_SAME_COMP) | \
  508. (1UL << QUEUE_FLAG_NOWAIT))
  509. void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
  510. void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
  511. bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
  512. #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
  513. #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
  514. #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
  515. #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
  516. #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
  517. #define blk_queue_noxmerges(q) \
  518. test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
  519. #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
  520. #define blk_queue_stable_writes(q) \
  521. test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
  522. #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
  523. #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
  524. #define blk_queue_zone_resetall(q) \
  525. test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
  526. #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
  527. #define blk_queue_pci_p2pdma(q) \
  528. test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
  529. #ifdef CONFIG_BLK_RQ_ALLOC_TIME
  530. #define blk_queue_rq_alloc_time(q) \
  531. test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
  532. #else
  533. #define blk_queue_rq_alloc_time(q) false
  534. #endif
  535. #define blk_noretry_request(rq) \
  536. ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
  537. REQ_FAILFAST_DRIVER))
  538. #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
  539. #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
  540. #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
  541. #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
  542. extern void blk_set_pm_only(struct request_queue *q);
  543. extern void blk_clear_pm_only(struct request_queue *q);
  544. #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
  545. #define dma_map_bvec(dev, bv, dir, attrs) \
  546. dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
  547. (dir), (attrs))
  548. static inline bool queue_is_mq(struct request_queue *q)
  549. {
  550. return q->mq_ops;
  551. }
  552. #ifdef CONFIG_PM
  553. static inline enum rpm_status queue_rpm_status(struct request_queue *q)
  554. {
  555. return q->rpm_status;
  556. }
  557. #else
  558. static inline enum rpm_status queue_rpm_status(struct request_queue *q)
  559. {
  560. return RPM_ACTIVE;
  561. }
  562. #endif
  563. static inline enum blk_zoned_model
  564. blk_queue_zoned_model(struct request_queue *q)
  565. {
  566. if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
  567. return q->limits.zoned;
  568. return BLK_ZONED_NONE;
  569. }
  570. static inline bool blk_queue_is_zoned(struct request_queue *q)
  571. {
  572. switch (blk_queue_zoned_model(q)) {
  573. case BLK_ZONED_HA:
  574. case BLK_ZONED_HM:
  575. return true;
  576. default:
  577. return false;
  578. }
  579. }
  580. #ifdef CONFIG_BLK_DEV_ZONED
  581. static inline unsigned int disk_nr_zones(struct gendisk *disk)
  582. {
  583. return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
  584. }
  585. static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
  586. {
  587. if (!blk_queue_is_zoned(disk->queue))
  588. return 0;
  589. return sector >> ilog2(disk->queue->limits.chunk_sectors);
  590. }
  591. static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
  592. {
  593. if (!blk_queue_is_zoned(disk->queue))
  594. return false;
  595. if (!disk->conv_zones_bitmap)
  596. return true;
  597. return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
  598. }
  599. static inline void disk_set_max_open_zones(struct gendisk *disk,
  600. unsigned int max_open_zones)
  601. {
  602. disk->max_open_zones = max_open_zones;
  603. }
  604. static inline void disk_set_max_active_zones(struct gendisk *disk,
  605. unsigned int max_active_zones)
  606. {
  607. disk->max_active_zones = max_active_zones;
  608. }
  609. static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
  610. {
  611. return bdev->bd_disk->max_open_zones;
  612. }
  613. static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
  614. {
  615. return bdev->bd_disk->max_active_zones;
  616. }
  617. #else /* CONFIG_BLK_DEV_ZONED */
  618. static inline unsigned int disk_nr_zones(struct gendisk *disk)
  619. {
  620. return 0;
  621. }
  622. static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
  623. {
  624. return false;
  625. }
  626. static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
  627. {
  628. return 0;
  629. }
  630. static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
  631. {
  632. return 0;
  633. }
  634. static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
  635. {
  636. return 0;
  637. }
  638. #endif /* CONFIG_BLK_DEV_ZONED */
  639. static inline unsigned int blk_queue_depth(struct request_queue *q)
  640. {
  641. if (q->queue_depth)
  642. return q->queue_depth;
  643. return q->nr_requests;
  644. }
  645. /*
  646. * default timeout for SG_IO if none specified
  647. */
  648. #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
  649. #define BLK_MIN_SG_TIMEOUT (7 * HZ)
  650. /* This should not be used directly - use rq_for_each_segment */
  651. #define for_each_bio(_bio) \
  652. for (; _bio; _bio = _bio->bi_next)
  653. int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
  654. const struct attribute_group **groups);
  655. static inline int __must_check add_disk(struct gendisk *disk)
  656. {
  657. return device_add_disk(NULL, disk, NULL);
  658. }
  659. void del_gendisk(struct gendisk *gp);
  660. void invalidate_disk(struct gendisk *disk);
  661. void set_disk_ro(struct gendisk *disk, bool read_only);
  662. void disk_uevent(struct gendisk *disk, enum kobject_action action);
  663. static inline int get_disk_ro(struct gendisk *disk)
  664. {
  665. return disk->part0->bd_read_only ||
  666. test_bit(GD_READ_ONLY, &disk->state);
  667. }
  668. static inline int bdev_read_only(struct block_device *bdev)
  669. {
  670. return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
  671. }
  672. bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
  673. bool disk_force_media_change(struct gendisk *disk, unsigned int events);
  674. void add_disk_randomness(struct gendisk *disk) __latent_entropy;
  675. void rand_initialize_disk(struct gendisk *disk);
  676. static inline sector_t get_start_sect(struct block_device *bdev)
  677. {
  678. return bdev->bd_start_sect;
  679. }
  680. static inline sector_t bdev_nr_sectors(struct block_device *bdev)
  681. {
  682. return bdev->bd_nr_sectors;
  683. }
  684. static inline loff_t bdev_nr_bytes(struct block_device *bdev)
  685. {
  686. return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
  687. }
  688. static inline sector_t get_capacity(struct gendisk *disk)
  689. {
  690. return bdev_nr_sectors(disk->part0);
  691. }
  692. static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
  693. {
  694. return bdev_nr_sectors(sb->s_bdev) >>
  695. (sb->s_blocksize_bits - SECTOR_SHIFT);
  696. }
  697. int bdev_disk_changed(struct gendisk *disk, bool invalidate);
  698. void put_disk(struct gendisk *disk);
  699. struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
  700. /**
  701. * blk_alloc_disk - allocate a gendisk structure
  702. * @node_id: numa node to allocate on
  703. *
  704. * Allocate and pre-initialize a gendisk structure for use with BIO based
  705. * drivers.
  706. *
  707. * Context: can sleep
  708. */
  709. #define blk_alloc_disk(node_id) \
  710. ({ \
  711. static struct lock_class_key __key; \
  712. \
  713. __blk_alloc_disk(node_id, &__key); \
  714. })
  715. int __register_blkdev(unsigned int major, const char *name,
  716. void (*probe)(dev_t devt));
  717. #define register_blkdev(major, name) \
  718. __register_blkdev(major, name, NULL)
  719. void unregister_blkdev(unsigned int major, const char *name);
  720. bool bdev_check_media_change(struct block_device *bdev);
  721. int __invalidate_device(struct block_device *bdev, bool kill_dirty);
  722. void set_capacity(struct gendisk *disk, sector_t size);
  723. #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
  724. int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
  725. void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
  726. int bd_register_pending_holders(struct gendisk *disk);
  727. #else
  728. static inline int bd_link_disk_holder(struct block_device *bdev,
  729. struct gendisk *disk)
  730. {
  731. return 0;
  732. }
  733. static inline void bd_unlink_disk_holder(struct block_device *bdev,
  734. struct gendisk *disk)
  735. {
  736. }
  737. static inline int bd_register_pending_holders(struct gendisk *disk)
  738. {
  739. return 0;
  740. }
  741. #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
  742. dev_t part_devt(struct gendisk *disk, u8 partno);
  743. void inc_diskseq(struct gendisk *disk);
  744. dev_t blk_lookup_devt(const char *name, int partno);
  745. void blk_request_module(dev_t devt);
  746. extern int blk_register_queue(struct gendisk *disk);
  747. extern void blk_unregister_queue(struct gendisk *disk);
  748. void submit_bio_noacct(struct bio *bio);
  749. struct bio *bio_split_to_limits(struct bio *bio);
  750. extern int blk_lld_busy(struct request_queue *q);
  751. extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
  752. extern void blk_queue_exit(struct request_queue *q);
  753. extern void blk_sync_queue(struct request_queue *q);
  754. /* Helper to convert REQ_OP_XXX to its string format XXX */
  755. extern const char *blk_op_str(enum req_op op);
  756. int blk_status_to_errno(blk_status_t status);
  757. blk_status_t errno_to_blk_status(int errno);
  758. /* only poll the hardware once, don't continue until a completion was found */
  759. #define BLK_POLL_ONESHOT (1 << 0)
  760. /* do not sleep to wait for the expected completion time */
  761. #define BLK_POLL_NOSLEEP (1 << 1)
  762. int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
  763. int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
  764. unsigned int flags);
  765. static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
  766. {
  767. return bdev->bd_queue; /* this is never NULL */
  768. }
  769. /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
  770. const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
  771. static inline unsigned int bio_zone_no(struct bio *bio)
  772. {
  773. return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
  774. }
  775. static inline unsigned int bio_zone_is_seq(struct bio *bio)
  776. {
  777. return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
  778. }
  779. /*
  780. * Return how much of the chunk is left to be used for I/O at a given offset.
  781. */
  782. static inline unsigned int blk_chunk_sectors_left(sector_t offset,
  783. unsigned int chunk_sectors)
  784. {
  785. if (unlikely(!is_power_of_2(chunk_sectors)))
  786. return chunk_sectors - sector_div(offset, chunk_sectors);
  787. return chunk_sectors - (offset & (chunk_sectors - 1));
  788. }
  789. /*
  790. * Access functions for manipulating queue properties
  791. */
  792. void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
  793. extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
  794. extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
  795. extern void blk_queue_max_segments(struct request_queue *, unsigned short);
  796. extern void blk_queue_max_discard_segments(struct request_queue *,
  797. unsigned short);
  798. void blk_queue_max_secure_erase_sectors(struct request_queue *q,
  799. unsigned int max_sectors);
  800. extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
  801. extern void blk_queue_max_discard_sectors(struct request_queue *q,
  802. unsigned int max_discard_sectors);
  803. extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
  804. unsigned int max_write_same_sectors);
  805. extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
  806. extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
  807. unsigned int max_zone_append_sectors);
  808. extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
  809. void blk_queue_zone_write_granularity(struct request_queue *q,
  810. unsigned int size);
  811. extern void blk_queue_alignment_offset(struct request_queue *q,
  812. unsigned int alignment);
  813. void disk_update_readahead(struct gendisk *disk);
  814. extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
  815. extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
  816. extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
  817. extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
  818. extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
  819. extern void blk_set_stacking_limits(struct queue_limits *lim);
  820. extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
  821. sector_t offset);
  822. extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  823. sector_t offset);
  824. extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
  825. extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
  826. extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
  827. extern void blk_queue_dma_alignment(struct request_queue *, int);
  828. extern void blk_queue_update_dma_alignment(struct request_queue *, int);
  829. extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
  830. extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
  831. struct blk_independent_access_ranges *
  832. disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
  833. void disk_set_independent_access_ranges(struct gendisk *disk,
  834. struct blk_independent_access_ranges *iars);
  835. /*
  836. * Elevator features for blk_queue_required_elevator_features:
  837. */
  838. /* Supports zoned block devices sequential write constraint */
  839. #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
  840. extern void blk_queue_required_elevator_features(struct request_queue *q,
  841. unsigned int features);
  842. extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
  843. struct device *dev);
  844. bool __must_check blk_get_queue(struct request_queue *);
  845. extern void blk_put_queue(struct request_queue *);
  846. void blk_mark_disk_dead(struct gendisk *disk);
  847. #ifdef CONFIG_BLOCK
  848. /*
  849. * blk_plug permits building a queue of related requests by holding the I/O
  850. * fragments for a short period. This allows merging of sequential requests
  851. * into single larger request. As the requests are moved from a per-task list to
  852. * the device's request_queue in a batch, this results in improved scalability
  853. * as the lock contention for request_queue lock is reduced.
  854. *
  855. * It is ok not to disable preemption when adding the request to the plug list
  856. * or when attempting a merge. For details, please see schedule() where
  857. * blk_flush_plug() is called.
  858. */
  859. struct blk_plug {
  860. struct request *mq_list; /* blk-mq requests */
  861. /* if ios_left is > 1, we can batch tag/rq allocations */
  862. struct request *cached_rq;
  863. unsigned short nr_ios;
  864. unsigned short rq_count;
  865. bool multiple_queues;
  866. bool has_elevator;
  867. bool nowait;
  868. struct list_head cb_list; /* md requires an unplug callback */
  869. };
  870. struct blk_plug_cb;
  871. typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
  872. struct blk_plug_cb {
  873. struct list_head list;
  874. blk_plug_cb_fn callback;
  875. void *data;
  876. };
  877. extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
  878. void *data, int size);
  879. extern void blk_start_plug(struct blk_plug *);
  880. extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
  881. extern void blk_finish_plug(struct blk_plug *);
  882. void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
  883. static inline void blk_flush_plug(struct blk_plug *plug, bool async)
  884. {
  885. if (plug)
  886. __blk_flush_plug(plug, async);
  887. }
  888. int blkdev_issue_flush(struct block_device *bdev);
  889. long nr_blockdev_pages(void);
  890. #else /* CONFIG_BLOCK */
  891. struct blk_plug {
  892. };
  893. static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
  894. unsigned short nr_ios)
  895. {
  896. }
  897. static inline void blk_start_plug(struct blk_plug *plug)
  898. {
  899. }
  900. static inline void blk_finish_plug(struct blk_plug *plug)
  901. {
  902. }
  903. static inline void blk_flush_plug(struct blk_plug *plug, bool async)
  904. {
  905. }
  906. static inline int blkdev_issue_flush(struct block_device *bdev)
  907. {
  908. return 0;
  909. }
  910. static inline long nr_blockdev_pages(void)
  911. {
  912. return 0;
  913. }
  914. #endif /* CONFIG_BLOCK */
  915. extern void blk_io_schedule(void);
  916. int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  917. sector_t nr_sects, gfp_t gfp_mask);
  918. int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  919. sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
  920. int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
  921. sector_t nr_sects, gfp_t gfp);
  922. #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
  923. #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
  924. extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  925. sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
  926. unsigned flags);
  927. extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  928. sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
  929. static inline int sb_issue_discard(struct super_block *sb, sector_t block,
  930. sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
  931. {
  932. return blkdev_issue_discard(sb->s_bdev,
  933. block << (sb->s_blocksize_bits -
  934. SECTOR_SHIFT),
  935. nr_blocks << (sb->s_blocksize_bits -
  936. SECTOR_SHIFT),
  937. gfp_mask);
  938. }
  939. static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
  940. sector_t nr_blocks, gfp_t gfp_mask)
  941. {
  942. return blkdev_issue_zeroout(sb->s_bdev,
  943. block << (sb->s_blocksize_bits -
  944. SECTOR_SHIFT),
  945. nr_blocks << (sb->s_blocksize_bits -
  946. SECTOR_SHIFT),
  947. gfp_mask, 0);
  948. }
  949. static inline bool bdev_is_partition(struct block_device *bdev)
  950. {
  951. return bdev->bd_partno;
  952. }
  953. enum blk_default_limits {
  954. BLK_MAX_SEGMENTS = 128,
  955. BLK_SAFE_MAX_SECTORS = 255,
  956. BLK_DEF_MAX_SECTORS = 2560,
  957. BLK_MAX_SEGMENT_SIZE = 65536,
  958. BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
  959. };
  960. static inline unsigned long queue_segment_boundary(const struct request_queue *q)
  961. {
  962. return q->limits.seg_boundary_mask;
  963. }
  964. static inline unsigned long queue_virt_boundary(const struct request_queue *q)
  965. {
  966. return q->limits.virt_boundary_mask;
  967. }
  968. static inline unsigned int queue_max_sectors(const struct request_queue *q)
  969. {
  970. return q->limits.max_sectors;
  971. }
  972. static inline unsigned int queue_max_bytes(struct request_queue *q)
  973. {
  974. return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
  975. }
  976. static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
  977. {
  978. return q->limits.max_hw_sectors;
  979. }
  980. static inline unsigned short queue_max_segments(const struct request_queue *q)
  981. {
  982. return q->limits.max_segments;
  983. }
  984. static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
  985. {
  986. return q->limits.max_discard_segments;
  987. }
  988. static inline unsigned int queue_max_segment_size(const struct request_queue *q)
  989. {
  990. return q->limits.max_segment_size;
  991. }
  992. static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
  993. {
  994. const struct queue_limits *l = &q->limits;
  995. return min(l->max_zone_append_sectors, l->max_sectors);
  996. }
  997. static inline unsigned int
  998. bdev_max_zone_append_sectors(struct block_device *bdev)
  999. {
  1000. return queue_max_zone_append_sectors(bdev_get_queue(bdev));
  1001. }
  1002. static inline unsigned int bdev_max_segments(struct block_device *bdev)
  1003. {
  1004. return queue_max_segments(bdev_get_queue(bdev));
  1005. }
  1006. static inline unsigned queue_logical_block_size(const struct request_queue *q)
  1007. {
  1008. int retval = 512;
  1009. if (q && q->limits.logical_block_size)
  1010. retval = q->limits.logical_block_size;
  1011. return retval;
  1012. }
  1013. static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
  1014. {
  1015. return queue_logical_block_size(bdev_get_queue(bdev));
  1016. }
  1017. static inline unsigned int queue_physical_block_size(const struct request_queue *q)
  1018. {
  1019. return q->limits.physical_block_size;
  1020. }
  1021. static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
  1022. {
  1023. return queue_physical_block_size(bdev_get_queue(bdev));
  1024. }
  1025. static inline unsigned int queue_io_min(const struct request_queue *q)
  1026. {
  1027. return q->limits.io_min;
  1028. }
  1029. static inline int bdev_io_min(struct block_device *bdev)
  1030. {
  1031. return queue_io_min(bdev_get_queue(bdev));
  1032. }
  1033. static inline unsigned int queue_io_opt(const struct request_queue *q)
  1034. {
  1035. return q->limits.io_opt;
  1036. }
  1037. static inline int bdev_io_opt(struct block_device *bdev)
  1038. {
  1039. return queue_io_opt(bdev_get_queue(bdev));
  1040. }
  1041. static inline unsigned int
  1042. queue_zone_write_granularity(const struct request_queue *q)
  1043. {
  1044. return q->limits.zone_write_granularity;
  1045. }
  1046. static inline unsigned int
  1047. bdev_zone_write_granularity(struct block_device *bdev)
  1048. {
  1049. return queue_zone_write_granularity(bdev_get_queue(bdev));
  1050. }
  1051. int bdev_alignment_offset(struct block_device *bdev);
  1052. unsigned int bdev_discard_alignment(struct block_device *bdev);
  1053. static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
  1054. {
  1055. return bdev_get_queue(bdev)->limits.max_discard_sectors;
  1056. }
  1057. static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
  1058. {
  1059. return bdev_get_queue(bdev)->limits.discard_granularity;
  1060. }
  1061. static inline unsigned int
  1062. bdev_max_secure_erase_sectors(struct block_device *bdev)
  1063. {
  1064. return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
  1065. }
  1066. static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
  1067. {
  1068. struct request_queue *q = bdev_get_queue(bdev);
  1069. if (q)
  1070. return q->limits.max_write_zeroes_sectors;
  1071. return 0;
  1072. }
  1073. static inline bool bdev_nonrot(struct block_device *bdev)
  1074. {
  1075. return blk_queue_nonrot(bdev_get_queue(bdev));
  1076. }
  1077. static inline bool bdev_stable_writes(struct block_device *bdev)
  1078. {
  1079. return test_bit(QUEUE_FLAG_STABLE_WRITES,
  1080. &bdev_get_queue(bdev)->queue_flags);
  1081. }
  1082. static inline bool bdev_write_cache(struct block_device *bdev)
  1083. {
  1084. return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
  1085. }
  1086. static inline bool bdev_fua(struct block_device *bdev)
  1087. {
  1088. return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
  1089. }
  1090. static inline bool bdev_nowait(struct block_device *bdev)
  1091. {
  1092. return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
  1093. }
  1094. static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
  1095. {
  1096. struct request_queue *q = bdev_get_queue(bdev);
  1097. if (q)
  1098. return blk_queue_zoned_model(q);
  1099. return BLK_ZONED_NONE;
  1100. }
  1101. static inline bool bdev_is_zoned(struct block_device *bdev)
  1102. {
  1103. struct request_queue *q = bdev_get_queue(bdev);
  1104. if (q)
  1105. return blk_queue_is_zoned(q);
  1106. return false;
  1107. }
  1108. static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
  1109. enum req_op op)
  1110. {
  1111. if (!bdev_is_zoned(bdev))
  1112. return false;
  1113. return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
  1114. }
  1115. static inline sector_t bdev_zone_sectors(struct block_device *bdev)
  1116. {
  1117. struct request_queue *q = bdev_get_queue(bdev);
  1118. if (!blk_queue_is_zoned(q))
  1119. return 0;
  1120. return q->limits.chunk_sectors;
  1121. }
  1122. static inline int queue_dma_alignment(const struct request_queue *q)
  1123. {
  1124. return q ? q->limits.dma_alignment : 511;
  1125. }
  1126. static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
  1127. {
  1128. return queue_dma_alignment(bdev_get_queue(bdev));
  1129. }
  1130. static inline bool bdev_iter_is_aligned(struct block_device *bdev,
  1131. struct iov_iter *iter)
  1132. {
  1133. return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
  1134. bdev_logical_block_size(bdev) - 1);
  1135. }
  1136. static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
  1137. unsigned int len)
  1138. {
  1139. unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
  1140. return !(addr & alignment) && !(len & alignment);
  1141. }
  1142. /* assumes size > 256 */
  1143. static inline unsigned int blksize_bits(unsigned int size)
  1144. {
  1145. unsigned int bits = 8;
  1146. do {
  1147. bits++;
  1148. size >>= 1;
  1149. } while (size > 256);
  1150. return bits;
  1151. }
  1152. static inline unsigned int block_size(struct block_device *bdev)
  1153. {
  1154. return 1 << bdev->bd_inode->i_blkbits;
  1155. }
  1156. int kblockd_schedule_work(struct work_struct *work);
  1157. int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
  1158. #define MODULE_ALIAS_BLOCKDEV(major,minor) \
  1159. MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
  1160. #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
  1161. MODULE_ALIAS("block-major-" __stringify(major) "-*")
  1162. #ifdef CONFIG_BLK_INLINE_ENCRYPTION
  1163. bool blk_crypto_register(struct blk_crypto_profile *profile,
  1164. struct request_queue *q);
  1165. #else /* CONFIG_BLK_INLINE_ENCRYPTION */
  1166. static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
  1167. struct request_queue *q)
  1168. {
  1169. return true;
  1170. }
  1171. #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
  1172. enum blk_unique_id {
  1173. /* these match the Designator Types specified in SPC */
  1174. BLK_UID_T10 = 1,
  1175. BLK_UID_EUI64 = 2,
  1176. BLK_UID_NAA = 3,
  1177. };
  1178. #define NFL4_UFLG_MASK 0x0000003F
  1179. struct block_device_operations {
  1180. void (*submit_bio)(struct bio *bio);
  1181. int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
  1182. unsigned int flags);
  1183. int (*open) (struct block_device *, fmode_t);
  1184. void (*release) (struct gendisk *, fmode_t);
  1185. int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op);
  1186. int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
  1187. int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
  1188. unsigned int (*check_events) (struct gendisk *disk,
  1189. unsigned int clearing);
  1190. void (*unlock_native_capacity) (struct gendisk *);
  1191. int (*getgeo)(struct block_device *, struct hd_geometry *);
  1192. int (*set_read_only)(struct block_device *bdev, bool ro);
  1193. void (*free_disk)(struct gendisk *disk);
  1194. /* this callback is with swap_lock and sometimes page table lock held */
  1195. void (*swap_slot_free_notify) (struct block_device *, unsigned long);
  1196. int (*report_zones)(struct gendisk *, sector_t sector,
  1197. unsigned int nr_zones, report_zones_cb cb, void *data);
  1198. char *(*devnode)(struct gendisk *disk, umode_t *mode);
  1199. /* returns the length of the identifier or a negative errno: */
  1200. int (*get_unique_id)(struct gendisk *disk, u8 id[16],
  1201. enum blk_unique_id id_type);
  1202. struct module *owner;
  1203. const struct pr_ops *pr_ops;
  1204. /*
  1205. * Special callback for probing GPT entry at a given sector.
  1206. * Needed by Android devices, used by GPT scanner and MMC blk
  1207. * driver.
  1208. */
  1209. int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
  1210. ANDROID_KABI_RESERVE(1);
  1211. ANDROID_KABI_RESERVE(2);
  1212. };
  1213. #ifdef CONFIG_COMPAT
  1214. extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
  1215. unsigned int, unsigned long);
  1216. #else
  1217. #define blkdev_compat_ptr_ioctl NULL
  1218. #endif
  1219. extern int bdev_read_page(struct block_device *, sector_t, struct page *);
  1220. extern int bdev_write_page(struct block_device *, sector_t, struct page *,
  1221. struct writeback_control *);
  1222. static inline void blk_wake_io_task(struct task_struct *waiter)
  1223. {
  1224. /*
  1225. * If we're polling, the task itself is doing the completions. For
  1226. * that case, we don't need to signal a wakeup, it's enough to just
  1227. * mark us as RUNNING.
  1228. */
  1229. if (waiter == current)
  1230. __set_current_state(TASK_RUNNING);
  1231. else
  1232. wake_up_process(waiter);
  1233. }
  1234. unsigned long bdev_start_io_acct(struct block_device *bdev,
  1235. unsigned int sectors, enum req_op op,
  1236. unsigned long start_time);
  1237. void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
  1238. unsigned long start_time);
  1239. void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
  1240. unsigned long bio_start_io_acct(struct bio *bio);
  1241. void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
  1242. struct block_device *orig_bdev);
  1243. /**
  1244. * bio_end_io_acct - end I/O accounting for bio based drivers
  1245. * @bio: bio to end account for
  1246. * @start_time: start time returned by bio_start_io_acct()
  1247. */
  1248. static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
  1249. {
  1250. return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
  1251. }
  1252. int bdev_read_only(struct block_device *bdev);
  1253. int set_blocksize(struct block_device *bdev, int size);
  1254. int lookup_bdev(const char *pathname, dev_t *dev);
  1255. void blkdev_show(struct seq_file *seqf, off_t offset);
  1256. #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
  1257. #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
  1258. #ifdef CONFIG_BLOCK
  1259. #define BLKDEV_MAJOR_MAX 512
  1260. #else
  1261. #define BLKDEV_MAJOR_MAX 0
  1262. #endif
  1263. struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
  1264. void *holder);
  1265. struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
  1266. int bd_prepare_to_claim(struct block_device *bdev, void *holder);
  1267. void bd_abort_claiming(struct block_device *bdev, void *holder);
  1268. void blkdev_put(struct block_device *bdev, fmode_t mode);
  1269. /* just for blk-cgroup, don't use elsewhere */
  1270. struct block_device *blkdev_get_no_open(dev_t dev);
  1271. void blkdev_put_no_open(struct block_device *bdev);
  1272. struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
  1273. void bdev_add(struct block_device *bdev, dev_t dev);
  1274. struct block_device *I_BDEV(struct inode *inode);
  1275. int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
  1276. loff_t lend);
  1277. #ifdef CONFIG_BLOCK
  1278. void invalidate_bdev(struct block_device *bdev);
  1279. int sync_blockdev(struct block_device *bdev);
  1280. int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
  1281. int sync_blockdev_nowait(struct block_device *bdev);
  1282. void sync_bdevs(bool wait);
  1283. void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
  1284. void printk_all_partitions(void);
  1285. #else
  1286. static inline void invalidate_bdev(struct block_device *bdev)
  1287. {
  1288. }
  1289. static inline int sync_blockdev(struct block_device *bdev)
  1290. {
  1291. return 0;
  1292. }
  1293. static inline int sync_blockdev_nowait(struct block_device *bdev)
  1294. {
  1295. return 0;
  1296. }
  1297. static inline void sync_bdevs(bool wait)
  1298. {
  1299. }
  1300. static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
  1301. {
  1302. }
  1303. static inline void printk_all_partitions(void)
  1304. {
  1305. }
  1306. #endif /* CONFIG_BLOCK */
  1307. int fsync_bdev(struct block_device *bdev);
  1308. int freeze_bdev(struct block_device *bdev);
  1309. int thaw_bdev(struct block_device *bdev);
  1310. struct io_comp_batch {
  1311. struct request *req_list;
  1312. bool need_ts;
  1313. void (*complete)(struct io_comp_batch *);
  1314. };
  1315. #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
  1316. #endif /* _LINUX_BLKDEV_H */