sd.c 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * sd.c Copyright (C) 1992 Drew Eckhardt
  4. * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
  5. *
  6. * Linux scsi disk driver
  7. * Initial versions: Drew Eckhardt
  8. * Subsequent revisions: Eric Youngdale
  9. * Modification history:
  10. * - Drew Eckhardt <[email protected]> original
  11. * - Eric Youngdale <[email protected]> add scatter-gather, multiple
  12. * outstanding request, and other enhancements.
  13. * Support loadable low-level scsi drivers.
  14. * - Jirka Hanika <[email protected]> support more scsi disks using
  15. * eight major numbers.
  16. * - Richard Gooch <[email protected]> support devfs.
  17. * - Torben Mathiasen <[email protected]> Resource allocation fixes in
  18. * sd_init and cleanups.
  19. * - Alex Davis <[email protected]> Fix problem where partition info
  20. * not being read in sd_open. Fix problem where removable media
  21. * could be ejected after sd_open.
  22. * - Douglas Gilbert <[email protected]> cleanup for lk 2.5.x
  23. * - Badari Pulavarty <[email protected]>, Matthew Wilcox
  24. * <[email protected]>, Kurt Garloff <[email protected]>:
  25. * Support 32k/1M disks.
  26. *
  27. * Logging policy (needs CONFIG_SCSI_LOGGING defined):
  28. * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
  29. * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
  30. * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
  31. * - entering other commands: SCSI_LOG_HLQUEUE level 3
  32. * Note: when the logging level is set by the user, it must be greater
  33. * than the level indicated above to trigger output.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/fs.h>
  37. #include <linux/kernel.h>
  38. #include <linux/mm.h>
  39. #include <linux/bio.h>
  40. #include <linux/hdreg.h>
  41. #include <linux/errno.h>
  42. #include <linux/idr.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/init.h>
  45. #include <linux/blkdev.h>
  46. #include <linux/blkpg.h>
  47. #include <linux/blk-pm.h>
  48. #include <linux/delay.h>
  49. #include <linux/major.h>
  50. #include <linux/mutex.h>
  51. #include <linux/string_helpers.h>
  52. #include <linux/slab.h>
  53. #include <linux/sed-opal.h>
  54. #include <linux/pm_runtime.h>
  55. #include <linux/pr.h>
  56. #include <linux/t10-pi.h>
  57. #include <linux/uaccess.h>
  58. #include <asm/unaligned.h>
  59. #include <scsi/scsi.h>
  60. #include <scsi/scsi_cmnd.h>
  61. #include <scsi/scsi_dbg.h>
  62. #include <scsi/scsi_device.h>
  63. #include <scsi/scsi_driver.h>
  64. #include <scsi/scsi_eh.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi_ioctl.h>
  67. #include <scsi/scsicam.h>
  68. #include "sd.h"
  69. #include "scsi_priv.h"
  70. #include "scsi_logging.h"
  71. MODULE_AUTHOR("Eric Youngdale");
  72. MODULE_DESCRIPTION("SCSI disk (sd) driver");
  73. MODULE_LICENSE("GPL");
  74. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
  75. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
  76. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
  77. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
  78. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
  79. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
  80. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
  81. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
  82. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
  83. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
  84. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
  85. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
  86. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
  87. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
  88. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
  89. MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
  90. MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
  91. MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
  92. MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
  93. MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
  94. #define SD_MINORS 16
  95. static void sd_config_discard(struct scsi_disk *, unsigned int);
  96. static void sd_config_write_same(struct scsi_disk *);
  97. static int sd_revalidate_disk(struct gendisk *);
  98. static void sd_unlock_native_capacity(struct gendisk *disk);
  99. static int sd_probe(struct device *);
  100. static int sd_remove(struct device *);
  101. static void sd_shutdown(struct device *);
  102. static int sd_suspend_system(struct device *);
  103. static int sd_suspend_runtime(struct device *);
  104. static int sd_resume_system(struct device *);
  105. static int sd_resume_runtime(struct device *);
  106. static void sd_rescan(struct device *);
  107. static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
  108. static void sd_uninit_command(struct scsi_cmnd *SCpnt);
  109. static int sd_done(struct scsi_cmnd *);
  110. static void sd_eh_reset(struct scsi_cmnd *);
  111. static int sd_eh_action(struct scsi_cmnd *, int);
  112. static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
  113. static void scsi_disk_release(struct device *cdev);
  114. static DEFINE_IDA(sd_index_ida);
  115. static struct kmem_cache *sd_cdb_cache;
  116. static mempool_t *sd_page_pool;
  117. static struct lock_class_key sd_bio_compl_lkclass;
  118. static const char *sd_cache_types[] = {
  119. "write through", "none", "write back",
  120. "write back, no read (daft)"
  121. };
  122. static void sd_set_flush_flag(struct scsi_disk *sdkp)
  123. {
  124. bool wc = false, fua = false;
  125. if (sdkp->WCE) {
  126. wc = true;
  127. if (sdkp->DPOFUA)
  128. fua = true;
  129. }
  130. blk_queue_write_cache(sdkp->disk->queue, wc, fua);
  131. }
  132. static ssize_t
  133. cache_type_store(struct device *dev, struct device_attribute *attr,
  134. const char *buf, size_t count)
  135. {
  136. int ct, rcd, wce, sp;
  137. struct scsi_disk *sdkp = to_scsi_disk(dev);
  138. struct scsi_device *sdp = sdkp->device;
  139. char buffer[64];
  140. char *buffer_data;
  141. struct scsi_mode_data data;
  142. struct scsi_sense_hdr sshdr;
  143. static const char temp[] = "temporary ";
  144. int len;
  145. if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
  146. /* no cache control on RBC devices; theoretically they
  147. * can do it, but there's probably so many exceptions
  148. * it's not worth the risk */
  149. return -EINVAL;
  150. if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
  151. buf += sizeof(temp) - 1;
  152. sdkp->cache_override = 1;
  153. } else {
  154. sdkp->cache_override = 0;
  155. }
  156. ct = sysfs_match_string(sd_cache_types, buf);
  157. if (ct < 0)
  158. return -EINVAL;
  159. rcd = ct & 0x01 ? 1 : 0;
  160. wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
  161. if (sdkp->cache_override) {
  162. sdkp->WCE = wce;
  163. sdkp->RCD = rcd;
  164. sd_set_flush_flag(sdkp);
  165. return count;
  166. }
  167. if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
  168. sdkp->max_retries, &data, NULL))
  169. return -EINVAL;
  170. len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
  171. data.block_descriptor_length);
  172. buffer_data = buffer + data.header_length +
  173. data.block_descriptor_length;
  174. buffer_data[2] &= ~0x05;
  175. buffer_data[2] |= wce << 2 | rcd;
  176. sp = buffer_data[0] & 0x80 ? 1 : 0;
  177. buffer_data[0] &= ~0x80;
  178. /*
  179. * Ensure WP, DPOFUA, and RESERVED fields are cleared in
  180. * received mode parameter buffer before doing MODE SELECT.
  181. */
  182. data.device_specific = 0;
  183. if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
  184. sdkp->max_retries, &data, &sshdr)) {
  185. if (scsi_sense_valid(&sshdr))
  186. sd_print_sense_hdr(sdkp, &sshdr);
  187. return -EINVAL;
  188. }
  189. sd_revalidate_disk(sdkp->disk);
  190. return count;
  191. }
  192. static ssize_t
  193. manage_start_stop_show(struct device *dev, struct device_attribute *attr,
  194. char *buf)
  195. {
  196. struct scsi_disk *sdkp = to_scsi_disk(dev);
  197. struct scsi_device *sdp = sdkp->device;
  198. return sprintf(buf, "%u\n", sdp->manage_start_stop);
  199. }
  200. static ssize_t
  201. manage_start_stop_store(struct device *dev, struct device_attribute *attr,
  202. const char *buf, size_t count)
  203. {
  204. struct scsi_disk *sdkp = to_scsi_disk(dev);
  205. struct scsi_device *sdp = sdkp->device;
  206. bool v;
  207. if (!capable(CAP_SYS_ADMIN))
  208. return -EACCES;
  209. if (kstrtobool(buf, &v))
  210. return -EINVAL;
  211. sdp->manage_start_stop = v;
  212. return count;
  213. }
  214. static DEVICE_ATTR_RW(manage_start_stop);
  215. static ssize_t
  216. allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
  217. {
  218. struct scsi_disk *sdkp = to_scsi_disk(dev);
  219. return sprintf(buf, "%u\n", sdkp->device->allow_restart);
  220. }
  221. static ssize_t
  222. allow_restart_store(struct device *dev, struct device_attribute *attr,
  223. const char *buf, size_t count)
  224. {
  225. bool v;
  226. struct scsi_disk *sdkp = to_scsi_disk(dev);
  227. struct scsi_device *sdp = sdkp->device;
  228. if (!capable(CAP_SYS_ADMIN))
  229. return -EACCES;
  230. if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
  231. return -EINVAL;
  232. if (kstrtobool(buf, &v))
  233. return -EINVAL;
  234. sdp->allow_restart = v;
  235. return count;
  236. }
  237. static DEVICE_ATTR_RW(allow_restart);
  238. static ssize_t
  239. cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
  240. {
  241. struct scsi_disk *sdkp = to_scsi_disk(dev);
  242. int ct = sdkp->RCD + 2*sdkp->WCE;
  243. return sprintf(buf, "%s\n", sd_cache_types[ct]);
  244. }
  245. static DEVICE_ATTR_RW(cache_type);
  246. static ssize_t
  247. FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
  248. {
  249. struct scsi_disk *sdkp = to_scsi_disk(dev);
  250. return sprintf(buf, "%u\n", sdkp->DPOFUA);
  251. }
  252. static DEVICE_ATTR_RO(FUA);
  253. static ssize_t
  254. protection_type_show(struct device *dev, struct device_attribute *attr,
  255. char *buf)
  256. {
  257. struct scsi_disk *sdkp = to_scsi_disk(dev);
  258. return sprintf(buf, "%u\n", sdkp->protection_type);
  259. }
  260. static ssize_t
  261. protection_type_store(struct device *dev, struct device_attribute *attr,
  262. const char *buf, size_t count)
  263. {
  264. struct scsi_disk *sdkp = to_scsi_disk(dev);
  265. unsigned int val;
  266. int err;
  267. if (!capable(CAP_SYS_ADMIN))
  268. return -EACCES;
  269. err = kstrtouint(buf, 10, &val);
  270. if (err)
  271. return err;
  272. if (val <= T10_PI_TYPE3_PROTECTION)
  273. sdkp->protection_type = val;
  274. return count;
  275. }
  276. static DEVICE_ATTR_RW(protection_type);
  277. static ssize_t
  278. protection_mode_show(struct device *dev, struct device_attribute *attr,
  279. char *buf)
  280. {
  281. struct scsi_disk *sdkp = to_scsi_disk(dev);
  282. struct scsi_device *sdp = sdkp->device;
  283. unsigned int dif, dix;
  284. dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
  285. dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
  286. if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
  287. dif = 0;
  288. dix = 1;
  289. }
  290. if (!dif && !dix)
  291. return sprintf(buf, "none\n");
  292. return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
  293. }
  294. static DEVICE_ATTR_RO(protection_mode);
  295. static ssize_t
  296. app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
  297. {
  298. struct scsi_disk *sdkp = to_scsi_disk(dev);
  299. return sprintf(buf, "%u\n", sdkp->ATO);
  300. }
  301. static DEVICE_ATTR_RO(app_tag_own);
  302. static ssize_t
  303. thin_provisioning_show(struct device *dev, struct device_attribute *attr,
  304. char *buf)
  305. {
  306. struct scsi_disk *sdkp = to_scsi_disk(dev);
  307. return sprintf(buf, "%u\n", sdkp->lbpme);
  308. }
  309. static DEVICE_ATTR_RO(thin_provisioning);
  310. /* sysfs_match_string() requires dense arrays */
  311. static const char *lbp_mode[] = {
  312. [SD_LBP_FULL] = "full",
  313. [SD_LBP_UNMAP] = "unmap",
  314. [SD_LBP_WS16] = "writesame_16",
  315. [SD_LBP_WS10] = "writesame_10",
  316. [SD_LBP_ZERO] = "writesame_zero",
  317. [SD_LBP_DISABLE] = "disabled",
  318. };
  319. static ssize_t
  320. provisioning_mode_show(struct device *dev, struct device_attribute *attr,
  321. char *buf)
  322. {
  323. struct scsi_disk *sdkp = to_scsi_disk(dev);
  324. return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
  325. }
  326. static ssize_t
  327. provisioning_mode_store(struct device *dev, struct device_attribute *attr,
  328. const char *buf, size_t count)
  329. {
  330. struct scsi_disk *sdkp = to_scsi_disk(dev);
  331. struct scsi_device *sdp = sdkp->device;
  332. int mode;
  333. if (!capable(CAP_SYS_ADMIN))
  334. return -EACCES;
  335. if (sd_is_zoned(sdkp)) {
  336. sd_config_discard(sdkp, SD_LBP_DISABLE);
  337. return count;
  338. }
  339. if (sdp->type != TYPE_DISK)
  340. return -EINVAL;
  341. mode = sysfs_match_string(lbp_mode, buf);
  342. if (mode < 0)
  343. return -EINVAL;
  344. sd_config_discard(sdkp, mode);
  345. return count;
  346. }
  347. static DEVICE_ATTR_RW(provisioning_mode);
  348. /* sysfs_match_string() requires dense arrays */
  349. static const char *zeroing_mode[] = {
  350. [SD_ZERO_WRITE] = "write",
  351. [SD_ZERO_WS] = "writesame",
  352. [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
  353. [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
  354. };
  355. static ssize_t
  356. zeroing_mode_show(struct device *dev, struct device_attribute *attr,
  357. char *buf)
  358. {
  359. struct scsi_disk *sdkp = to_scsi_disk(dev);
  360. return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
  361. }
  362. static ssize_t
  363. zeroing_mode_store(struct device *dev, struct device_attribute *attr,
  364. const char *buf, size_t count)
  365. {
  366. struct scsi_disk *sdkp = to_scsi_disk(dev);
  367. int mode;
  368. if (!capable(CAP_SYS_ADMIN))
  369. return -EACCES;
  370. mode = sysfs_match_string(zeroing_mode, buf);
  371. if (mode < 0)
  372. return -EINVAL;
  373. sdkp->zeroing_mode = mode;
  374. return count;
  375. }
  376. static DEVICE_ATTR_RW(zeroing_mode);
  377. static ssize_t
  378. max_medium_access_timeouts_show(struct device *dev,
  379. struct device_attribute *attr, char *buf)
  380. {
  381. struct scsi_disk *sdkp = to_scsi_disk(dev);
  382. return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
  383. }
  384. static ssize_t
  385. max_medium_access_timeouts_store(struct device *dev,
  386. struct device_attribute *attr, const char *buf,
  387. size_t count)
  388. {
  389. struct scsi_disk *sdkp = to_scsi_disk(dev);
  390. int err;
  391. if (!capable(CAP_SYS_ADMIN))
  392. return -EACCES;
  393. err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
  394. return err ? err : count;
  395. }
  396. static DEVICE_ATTR_RW(max_medium_access_timeouts);
  397. static ssize_t
  398. max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
  399. char *buf)
  400. {
  401. struct scsi_disk *sdkp = to_scsi_disk(dev);
  402. return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
  403. }
  404. static ssize_t
  405. max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
  406. const char *buf, size_t count)
  407. {
  408. struct scsi_disk *sdkp = to_scsi_disk(dev);
  409. struct scsi_device *sdp = sdkp->device;
  410. unsigned long max;
  411. int err;
  412. if (!capable(CAP_SYS_ADMIN))
  413. return -EACCES;
  414. if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
  415. return -EINVAL;
  416. err = kstrtoul(buf, 10, &max);
  417. if (err)
  418. return err;
  419. if (max == 0)
  420. sdp->no_write_same = 1;
  421. else if (max <= SD_MAX_WS16_BLOCKS) {
  422. sdp->no_write_same = 0;
  423. sdkp->max_ws_blocks = max;
  424. }
  425. sd_config_write_same(sdkp);
  426. return count;
  427. }
  428. static DEVICE_ATTR_RW(max_write_same_blocks);
  429. static ssize_t
  430. zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
  431. {
  432. struct scsi_disk *sdkp = to_scsi_disk(dev);
  433. if (sdkp->device->type == TYPE_ZBC)
  434. return sprintf(buf, "host-managed\n");
  435. if (sdkp->zoned == 1)
  436. return sprintf(buf, "host-aware\n");
  437. if (sdkp->zoned == 2)
  438. return sprintf(buf, "drive-managed\n");
  439. return sprintf(buf, "none\n");
  440. }
  441. static DEVICE_ATTR_RO(zoned_cap);
  442. static ssize_t
  443. max_retries_store(struct device *dev, struct device_attribute *attr,
  444. const char *buf, size_t count)
  445. {
  446. struct scsi_disk *sdkp = to_scsi_disk(dev);
  447. struct scsi_device *sdev = sdkp->device;
  448. int retries, err;
  449. err = kstrtoint(buf, 10, &retries);
  450. if (err)
  451. return err;
  452. if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
  453. sdkp->max_retries = retries;
  454. return count;
  455. }
  456. sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
  457. SD_MAX_RETRIES);
  458. return -EINVAL;
  459. }
  460. static ssize_t
  461. max_retries_show(struct device *dev, struct device_attribute *attr,
  462. char *buf)
  463. {
  464. struct scsi_disk *sdkp = to_scsi_disk(dev);
  465. return sprintf(buf, "%d\n", sdkp->max_retries);
  466. }
  467. static DEVICE_ATTR_RW(max_retries);
  468. static struct attribute *sd_disk_attrs[] = {
  469. &dev_attr_cache_type.attr,
  470. &dev_attr_FUA.attr,
  471. &dev_attr_allow_restart.attr,
  472. &dev_attr_manage_start_stop.attr,
  473. &dev_attr_protection_type.attr,
  474. &dev_attr_protection_mode.attr,
  475. &dev_attr_app_tag_own.attr,
  476. &dev_attr_thin_provisioning.attr,
  477. &dev_attr_provisioning_mode.attr,
  478. &dev_attr_zeroing_mode.attr,
  479. &dev_attr_max_write_same_blocks.attr,
  480. &dev_attr_max_medium_access_timeouts.attr,
  481. &dev_attr_zoned_cap.attr,
  482. &dev_attr_max_retries.attr,
  483. NULL,
  484. };
  485. ATTRIBUTE_GROUPS(sd_disk);
  486. static struct class sd_disk_class = {
  487. .name = "scsi_disk",
  488. .owner = THIS_MODULE,
  489. .dev_release = scsi_disk_release,
  490. .dev_groups = sd_disk_groups,
  491. };
  492. static const struct dev_pm_ops sd_pm_ops = {
  493. .suspend = sd_suspend_system,
  494. .resume = sd_resume_system,
  495. .poweroff = sd_suspend_system,
  496. .restore = sd_resume_system,
  497. .runtime_suspend = sd_suspend_runtime,
  498. .runtime_resume = sd_resume_runtime,
  499. };
  500. static struct scsi_driver sd_template = {
  501. .gendrv = {
  502. .name = "sd",
  503. .owner = THIS_MODULE,
  504. .probe = sd_probe,
  505. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  506. .remove = sd_remove,
  507. .shutdown = sd_shutdown,
  508. .pm = &sd_pm_ops,
  509. },
  510. .rescan = sd_rescan,
  511. .init_command = sd_init_command,
  512. .uninit_command = sd_uninit_command,
  513. .done = sd_done,
  514. .eh_action = sd_eh_action,
  515. .eh_reset = sd_eh_reset,
  516. };
  517. /*
  518. * Don't request a new module, as that could deadlock in multipath
  519. * environment.
  520. */
  521. static void sd_default_probe(dev_t devt)
  522. {
  523. }
  524. /*
  525. * Device no to disk mapping:
  526. *
  527. * major disc2 disc p1
  528. * |............|.............|....|....| <- dev_t
  529. * 31 20 19 8 7 4 3 0
  530. *
  531. * Inside a major, we have 16k disks, however mapped non-
  532. * contiguously. The first 16 disks are for major0, the next
  533. * ones with major1, ... Disk 256 is for major0 again, disk 272
  534. * for major1, ...
  535. * As we stay compatible with our numbering scheme, we can reuse
  536. * the well-know SCSI majors 8, 65--71, 136--143.
  537. */
  538. static int sd_major(int major_idx)
  539. {
  540. switch (major_idx) {
  541. case 0:
  542. return SCSI_DISK0_MAJOR;
  543. case 1 ... 7:
  544. return SCSI_DISK1_MAJOR + major_idx - 1;
  545. case 8 ... 15:
  546. return SCSI_DISK8_MAJOR + major_idx - 8;
  547. default:
  548. BUG();
  549. return 0; /* shut up gcc */
  550. }
  551. }
  552. #ifdef CONFIG_BLK_SED_OPAL
  553. static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
  554. size_t len, bool send)
  555. {
  556. struct scsi_disk *sdkp = data;
  557. struct scsi_device *sdev = sdkp->device;
  558. u8 cdb[12] = { 0, };
  559. const struct scsi_exec_args exec_args = {
  560. .req_flags = BLK_MQ_REQ_PM,
  561. };
  562. int ret;
  563. cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
  564. cdb[1] = secp;
  565. put_unaligned_be16(spsp, &cdb[2]);
  566. put_unaligned_be32(len, &cdb[6]);
  567. ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
  568. buffer, len, SD_TIMEOUT, sdkp->max_retries,
  569. &exec_args);
  570. return ret <= 0 ? ret : -EIO;
  571. }
  572. #endif /* CONFIG_BLK_SED_OPAL */
  573. /*
  574. * Look up the DIX operation based on whether the command is read or
  575. * write and whether dix and dif are enabled.
  576. */
  577. static unsigned int sd_prot_op(bool write, bool dix, bool dif)
  578. {
  579. /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
  580. static const unsigned int ops[] = { /* wrt dix dif */
  581. SCSI_PROT_NORMAL, /* 0 0 0 */
  582. SCSI_PROT_READ_STRIP, /* 0 0 1 */
  583. SCSI_PROT_READ_INSERT, /* 0 1 0 */
  584. SCSI_PROT_READ_PASS, /* 0 1 1 */
  585. SCSI_PROT_NORMAL, /* 1 0 0 */
  586. SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
  587. SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
  588. SCSI_PROT_WRITE_PASS, /* 1 1 1 */
  589. };
  590. return ops[write << 2 | dix << 1 | dif];
  591. }
  592. /*
  593. * Returns a mask of the protection flags that are valid for a given DIX
  594. * operation.
  595. */
  596. static unsigned int sd_prot_flag_mask(unsigned int prot_op)
  597. {
  598. static const unsigned int flag_mask[] = {
  599. [SCSI_PROT_NORMAL] = 0,
  600. [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
  601. SCSI_PROT_GUARD_CHECK |
  602. SCSI_PROT_REF_CHECK |
  603. SCSI_PROT_REF_INCREMENT,
  604. [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
  605. SCSI_PROT_IP_CHECKSUM,
  606. [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
  607. SCSI_PROT_GUARD_CHECK |
  608. SCSI_PROT_REF_CHECK |
  609. SCSI_PROT_REF_INCREMENT |
  610. SCSI_PROT_IP_CHECKSUM,
  611. [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
  612. SCSI_PROT_REF_INCREMENT,
  613. [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
  614. SCSI_PROT_REF_CHECK |
  615. SCSI_PROT_REF_INCREMENT |
  616. SCSI_PROT_IP_CHECKSUM,
  617. [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
  618. SCSI_PROT_GUARD_CHECK |
  619. SCSI_PROT_REF_CHECK |
  620. SCSI_PROT_REF_INCREMENT |
  621. SCSI_PROT_IP_CHECKSUM,
  622. };
  623. return flag_mask[prot_op];
  624. }
  625. static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
  626. unsigned int dix, unsigned int dif)
  627. {
  628. struct request *rq = scsi_cmd_to_rq(scmd);
  629. struct bio *bio = rq->bio;
  630. unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
  631. unsigned int protect = 0;
  632. if (dix) { /* DIX Type 0, 1, 2, 3 */
  633. if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
  634. scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
  635. if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
  636. scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
  637. }
  638. if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
  639. scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
  640. if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
  641. scmd->prot_flags |= SCSI_PROT_REF_CHECK;
  642. }
  643. if (dif) { /* DIX/DIF Type 1, 2, 3 */
  644. scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
  645. if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
  646. protect = 3 << 5; /* Disable target PI checking */
  647. else
  648. protect = 1 << 5; /* Enable target PI checking */
  649. }
  650. scsi_set_prot_op(scmd, prot_op);
  651. scsi_set_prot_type(scmd, dif);
  652. scmd->prot_flags &= sd_prot_flag_mask(prot_op);
  653. return protect;
  654. }
  655. static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
  656. {
  657. struct request_queue *q = sdkp->disk->queue;
  658. unsigned int logical_block_size = sdkp->device->sector_size;
  659. unsigned int max_blocks = 0;
  660. q->limits.discard_alignment =
  661. sdkp->unmap_alignment * logical_block_size;
  662. q->limits.discard_granularity =
  663. max(sdkp->physical_block_size,
  664. sdkp->unmap_granularity * logical_block_size);
  665. sdkp->provisioning_mode = mode;
  666. switch (mode) {
  667. case SD_LBP_FULL:
  668. case SD_LBP_DISABLE:
  669. blk_queue_max_discard_sectors(q, 0);
  670. return;
  671. case SD_LBP_UNMAP:
  672. max_blocks = min_not_zero(sdkp->max_unmap_blocks,
  673. (u32)SD_MAX_WS16_BLOCKS);
  674. break;
  675. case SD_LBP_WS16:
  676. if (sdkp->device->unmap_limit_for_ws)
  677. max_blocks = sdkp->max_unmap_blocks;
  678. else
  679. max_blocks = sdkp->max_ws_blocks;
  680. max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
  681. break;
  682. case SD_LBP_WS10:
  683. if (sdkp->device->unmap_limit_for_ws)
  684. max_blocks = sdkp->max_unmap_blocks;
  685. else
  686. max_blocks = sdkp->max_ws_blocks;
  687. max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
  688. break;
  689. case SD_LBP_ZERO:
  690. max_blocks = min_not_zero(sdkp->max_ws_blocks,
  691. (u32)SD_MAX_WS10_BLOCKS);
  692. break;
  693. }
  694. blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
  695. }
  696. static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
  697. {
  698. struct scsi_device *sdp = cmd->device;
  699. struct request *rq = scsi_cmd_to_rq(cmd);
  700. struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
  701. u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
  702. u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
  703. unsigned int data_len = 24;
  704. char *buf;
  705. rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
  706. if (!rq->special_vec.bv_page)
  707. return BLK_STS_RESOURCE;
  708. clear_highpage(rq->special_vec.bv_page);
  709. rq->special_vec.bv_offset = 0;
  710. rq->special_vec.bv_len = data_len;
  711. rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
  712. cmd->cmd_len = 10;
  713. cmd->cmnd[0] = UNMAP;
  714. cmd->cmnd[8] = 24;
  715. buf = bvec_virt(&rq->special_vec);
  716. put_unaligned_be16(6 + 16, &buf[0]);
  717. put_unaligned_be16(16, &buf[2]);
  718. put_unaligned_be64(lba, &buf[8]);
  719. put_unaligned_be32(nr_blocks, &buf[16]);
  720. cmd->allowed = sdkp->max_retries;
  721. cmd->transfersize = data_len;
  722. rq->timeout = SD_TIMEOUT;
  723. return scsi_alloc_sgtables(cmd);
  724. }
  725. static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
  726. bool unmap)
  727. {
  728. struct scsi_device *sdp = cmd->device;
  729. struct request *rq = scsi_cmd_to_rq(cmd);
  730. struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
  731. u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
  732. u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
  733. u32 data_len = sdp->sector_size;
  734. rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
  735. if (!rq->special_vec.bv_page)
  736. return BLK_STS_RESOURCE;
  737. clear_highpage(rq->special_vec.bv_page);
  738. rq->special_vec.bv_offset = 0;
  739. rq->special_vec.bv_len = data_len;
  740. rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
  741. cmd->cmd_len = 16;
  742. cmd->cmnd[0] = WRITE_SAME_16;
  743. if (unmap)
  744. cmd->cmnd[1] = 0x8; /* UNMAP */
  745. put_unaligned_be64(lba, &cmd->cmnd[2]);
  746. put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
  747. cmd->allowed = sdkp->max_retries;
  748. cmd->transfersize = data_len;
  749. rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
  750. return scsi_alloc_sgtables(cmd);
  751. }
  752. static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
  753. bool unmap)
  754. {
  755. struct scsi_device *sdp = cmd->device;
  756. struct request *rq = scsi_cmd_to_rq(cmd);
  757. struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
  758. u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
  759. u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
  760. u32 data_len = sdp->sector_size;
  761. rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
  762. if (!rq->special_vec.bv_page)
  763. return BLK_STS_RESOURCE;
  764. clear_highpage(rq->special_vec.bv_page);
  765. rq->special_vec.bv_offset = 0;
  766. rq->special_vec.bv_len = data_len;
  767. rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
  768. cmd->cmd_len = 10;
  769. cmd->cmnd[0] = WRITE_SAME;
  770. if (unmap)
  771. cmd->cmnd[1] = 0x8; /* UNMAP */
  772. put_unaligned_be32(lba, &cmd->cmnd[2]);
  773. put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
  774. cmd->allowed = sdkp->max_retries;
  775. cmd->transfersize = data_len;
  776. rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
  777. return scsi_alloc_sgtables(cmd);
  778. }
  779. static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
  780. {
  781. struct request *rq = scsi_cmd_to_rq(cmd);
  782. struct scsi_device *sdp = cmd->device;
  783. struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
  784. u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
  785. u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
  786. if (!(rq->cmd_flags & REQ_NOUNMAP)) {
  787. switch (sdkp->zeroing_mode) {
  788. case SD_ZERO_WS16_UNMAP:
  789. return sd_setup_write_same16_cmnd(cmd, true);
  790. case SD_ZERO_WS10_UNMAP:
  791. return sd_setup_write_same10_cmnd(cmd, true);
  792. }
  793. }
  794. if (sdp->no_write_same) {
  795. rq->rq_flags |= RQF_QUIET;
  796. return BLK_STS_TARGET;
  797. }
  798. if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
  799. return sd_setup_write_same16_cmnd(cmd, false);
  800. return sd_setup_write_same10_cmnd(cmd, false);
  801. }
  802. static void sd_config_write_same(struct scsi_disk *sdkp)
  803. {
  804. struct request_queue *q = sdkp->disk->queue;
  805. unsigned int logical_block_size = sdkp->device->sector_size;
  806. if (sdkp->device->no_write_same) {
  807. sdkp->max_ws_blocks = 0;
  808. goto out;
  809. }
  810. /* Some devices can not handle block counts above 0xffff despite
  811. * supporting WRITE SAME(16). Consequently we default to 64k
  812. * blocks per I/O unless the device explicitly advertises a
  813. * bigger limit.
  814. */
  815. if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
  816. sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
  817. (u32)SD_MAX_WS16_BLOCKS);
  818. else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
  819. sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
  820. (u32)SD_MAX_WS10_BLOCKS);
  821. else {
  822. sdkp->device->no_write_same = 1;
  823. sdkp->max_ws_blocks = 0;
  824. }
  825. if (sdkp->lbprz && sdkp->lbpws)
  826. sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
  827. else if (sdkp->lbprz && sdkp->lbpws10)
  828. sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
  829. else if (sdkp->max_ws_blocks)
  830. sdkp->zeroing_mode = SD_ZERO_WS;
  831. else
  832. sdkp->zeroing_mode = SD_ZERO_WRITE;
  833. if (sdkp->max_ws_blocks &&
  834. sdkp->physical_block_size > logical_block_size) {
  835. /*
  836. * Reporting a maximum number of blocks that is not aligned
  837. * on the device physical size would cause a large write same
  838. * request to be split into physically unaligned chunks by
  839. * __blkdev_issue_write_zeroes() even if the caller of this
  840. * functions took care to align the large request. So make sure
  841. * the maximum reported is aligned to the device physical block
  842. * size. This is only an optional optimization for regular
  843. * disks, but this is mandatory to avoid failure of large write
  844. * same requests directed at sequential write required zones of
  845. * host-managed ZBC disks.
  846. */
  847. sdkp->max_ws_blocks =
  848. round_down(sdkp->max_ws_blocks,
  849. bytes_to_logical(sdkp->device,
  850. sdkp->physical_block_size));
  851. }
  852. out:
  853. blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
  854. (logical_block_size >> 9));
  855. }
  856. static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
  857. {
  858. struct request *rq = scsi_cmd_to_rq(cmd);
  859. struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
  860. /* flush requests don't perform I/O, zero the S/G table */
  861. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  862. cmd->cmnd[0] = SYNCHRONIZE_CACHE;
  863. cmd->cmd_len = 10;
  864. cmd->transfersize = 0;
  865. cmd->allowed = sdkp->max_retries;
  866. rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
  867. return BLK_STS_OK;
  868. }
  869. static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
  870. sector_t lba, unsigned int nr_blocks,
  871. unsigned char flags)
  872. {
  873. cmd->cmd_len = SD_EXT_CDB_SIZE;
  874. cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
  875. cmd->cmnd[7] = 0x18; /* Additional CDB len */
  876. cmd->cmnd[9] = write ? WRITE_32 : READ_32;
  877. cmd->cmnd[10] = flags;
  878. put_unaligned_be64(lba, &cmd->cmnd[12]);
  879. put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
  880. put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
  881. return BLK_STS_OK;
  882. }
  883. static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
  884. sector_t lba, unsigned int nr_blocks,
  885. unsigned char flags)
  886. {
  887. cmd->cmd_len = 16;
  888. cmd->cmnd[0] = write ? WRITE_16 : READ_16;
  889. cmd->cmnd[1] = flags;
  890. cmd->cmnd[14] = 0;
  891. cmd->cmnd[15] = 0;
  892. put_unaligned_be64(lba, &cmd->cmnd[2]);
  893. put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
  894. return BLK_STS_OK;
  895. }
  896. static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
  897. sector_t lba, unsigned int nr_blocks,
  898. unsigned char flags)
  899. {
  900. cmd->cmd_len = 10;
  901. cmd->cmnd[0] = write ? WRITE_10 : READ_10;
  902. cmd->cmnd[1] = flags;
  903. cmd->cmnd[6] = 0;
  904. cmd->cmnd[9] = 0;
  905. put_unaligned_be32(lba, &cmd->cmnd[2]);
  906. put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
  907. return BLK_STS_OK;
  908. }
  909. static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
  910. sector_t lba, unsigned int nr_blocks,
  911. unsigned char flags)
  912. {
  913. /* Avoid that 0 blocks gets translated into 256 blocks. */
  914. if (WARN_ON_ONCE(nr_blocks == 0))
  915. return BLK_STS_IOERR;
  916. if (unlikely(flags & 0x8)) {
  917. /*
  918. * This happens only if this drive failed 10byte rw
  919. * command with ILLEGAL_REQUEST during operation and
  920. * thus turned off use_10_for_rw.
  921. */
  922. scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
  923. return BLK_STS_IOERR;
  924. }
  925. cmd->cmd_len = 6;
  926. cmd->cmnd[0] = write ? WRITE_6 : READ_6;
  927. cmd->cmnd[1] = (lba >> 16) & 0x1f;
  928. cmd->cmnd[2] = (lba >> 8) & 0xff;
  929. cmd->cmnd[3] = lba & 0xff;
  930. cmd->cmnd[4] = nr_blocks;
  931. cmd->cmnd[5] = 0;
  932. return BLK_STS_OK;
  933. }
  934. static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
  935. {
  936. struct request *rq = scsi_cmd_to_rq(cmd);
  937. struct scsi_device *sdp = cmd->device;
  938. struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
  939. sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
  940. sector_t threshold;
  941. unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
  942. unsigned int mask = logical_to_sectors(sdp, 1) - 1;
  943. bool write = rq_data_dir(rq) == WRITE;
  944. unsigned char protect, fua;
  945. blk_status_t ret;
  946. unsigned int dif;
  947. bool dix;
  948. ret = scsi_alloc_sgtables(cmd);
  949. if (ret != BLK_STS_OK)
  950. return ret;
  951. ret = BLK_STS_IOERR;
  952. if (!scsi_device_online(sdp) || sdp->changed) {
  953. scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
  954. goto fail;
  955. }
  956. if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
  957. scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
  958. goto fail;
  959. }
  960. if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
  961. scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
  962. goto fail;
  963. }
  964. /*
  965. * Some SD card readers can't handle accesses which touch the
  966. * last one or two logical blocks. Split accesses as needed.
  967. */
  968. threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
  969. if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
  970. if (lba < threshold) {
  971. /* Access up to the threshold but not beyond */
  972. nr_blocks = threshold - lba;
  973. } else {
  974. /* Access only a single logical block */
  975. nr_blocks = 1;
  976. }
  977. }
  978. if (req_op(rq) == REQ_OP_ZONE_APPEND) {
  979. ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
  980. if (ret)
  981. goto fail;
  982. }
  983. fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
  984. dix = scsi_prot_sg_count(cmd);
  985. dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
  986. if (dif || dix)
  987. protect = sd_setup_protect_cmnd(cmd, dix, dif);
  988. else
  989. protect = 0;
  990. if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
  991. ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
  992. protect | fua);
  993. } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
  994. ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
  995. protect | fua);
  996. } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
  997. sdp->use_10_for_rw || protect) {
  998. ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
  999. protect | fua);
  1000. } else {
  1001. ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
  1002. protect | fua);
  1003. }
  1004. if (unlikely(ret != BLK_STS_OK))
  1005. goto fail;
  1006. /*
  1007. * We shouldn't disconnect in the middle of a sector, so with a dumb
  1008. * host adapter, it's safe to assume that we can at least transfer
  1009. * this many bytes between each connect / disconnect.
  1010. */
  1011. cmd->transfersize = sdp->sector_size;
  1012. cmd->underflow = nr_blocks << 9;
  1013. cmd->allowed = sdkp->max_retries;
  1014. cmd->sdb.length = nr_blocks * sdp->sector_size;
  1015. SCSI_LOG_HLQUEUE(1,
  1016. scmd_printk(KERN_INFO, cmd,
  1017. "%s: block=%llu, count=%d\n", __func__,
  1018. (unsigned long long)blk_rq_pos(rq),
  1019. blk_rq_sectors(rq)));
  1020. SCSI_LOG_HLQUEUE(2,
  1021. scmd_printk(KERN_INFO, cmd,
  1022. "%s %d/%u 512 byte blocks.\n",
  1023. write ? "writing" : "reading", nr_blocks,
  1024. blk_rq_sectors(rq)));
  1025. /*
  1026. * This indicates that the command is ready from our end to be queued.
  1027. */
  1028. return BLK_STS_OK;
  1029. fail:
  1030. scsi_free_sgtables(cmd);
  1031. return ret;
  1032. }
  1033. static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
  1034. {
  1035. struct request *rq = scsi_cmd_to_rq(cmd);
  1036. switch (req_op(rq)) {
  1037. case REQ_OP_DISCARD:
  1038. switch (scsi_disk(rq->q->disk)->provisioning_mode) {
  1039. case SD_LBP_UNMAP:
  1040. return sd_setup_unmap_cmnd(cmd);
  1041. case SD_LBP_WS16:
  1042. return sd_setup_write_same16_cmnd(cmd, true);
  1043. case SD_LBP_WS10:
  1044. return sd_setup_write_same10_cmnd(cmd, true);
  1045. case SD_LBP_ZERO:
  1046. return sd_setup_write_same10_cmnd(cmd, false);
  1047. default:
  1048. return BLK_STS_TARGET;
  1049. }
  1050. case REQ_OP_WRITE_ZEROES:
  1051. return sd_setup_write_zeroes_cmnd(cmd);
  1052. case REQ_OP_FLUSH:
  1053. return sd_setup_flush_cmnd(cmd);
  1054. case REQ_OP_READ:
  1055. case REQ_OP_WRITE:
  1056. case REQ_OP_ZONE_APPEND:
  1057. return sd_setup_read_write_cmnd(cmd);
  1058. case REQ_OP_ZONE_RESET:
  1059. return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
  1060. false);
  1061. case REQ_OP_ZONE_RESET_ALL:
  1062. return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
  1063. true);
  1064. case REQ_OP_ZONE_OPEN:
  1065. return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
  1066. case REQ_OP_ZONE_CLOSE:
  1067. return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
  1068. case REQ_OP_ZONE_FINISH:
  1069. return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
  1070. default:
  1071. WARN_ON_ONCE(1);
  1072. return BLK_STS_NOTSUPP;
  1073. }
  1074. }
  1075. static void sd_uninit_command(struct scsi_cmnd *SCpnt)
  1076. {
  1077. struct request *rq = scsi_cmd_to_rq(SCpnt);
  1078. if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
  1079. mempool_free(rq->special_vec.bv_page, sd_page_pool);
  1080. }
  1081. static bool sd_need_revalidate(struct block_device *bdev,
  1082. struct scsi_disk *sdkp)
  1083. {
  1084. if (sdkp->device->removable || sdkp->write_prot) {
  1085. if (bdev_check_media_change(bdev))
  1086. return true;
  1087. }
  1088. /*
  1089. * Force a full rescan after ioctl(BLKRRPART). While the disk state has
  1090. * nothing to do with partitions, BLKRRPART is used to force a full
  1091. * revalidate after things like a format for historical reasons.
  1092. */
  1093. return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
  1094. }
  1095. /**
  1096. * sd_open - open a scsi disk device
  1097. * @bdev: Block device of the scsi disk to open
  1098. * @mode: FMODE_* mask
  1099. *
  1100. * Returns 0 if successful. Returns a negated errno value in case
  1101. * of error.
  1102. *
  1103. * Note: This can be called from a user context (e.g. fsck(1) )
  1104. * or from within the kernel (e.g. as a result of a mount(1) ).
  1105. * In the latter case @inode and @filp carry an abridged amount
  1106. * of information as noted above.
  1107. *
  1108. * Locking: called with bdev->bd_disk->open_mutex held.
  1109. **/
  1110. static int sd_open(struct block_device *bdev, fmode_t mode)
  1111. {
  1112. struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
  1113. struct scsi_device *sdev = sdkp->device;
  1114. int retval;
  1115. if (scsi_device_get(sdev))
  1116. return -ENXIO;
  1117. SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
  1118. /*
  1119. * If the device is in error recovery, wait until it is done.
  1120. * If the device is offline, then disallow any access to it.
  1121. */
  1122. retval = -ENXIO;
  1123. if (!scsi_block_when_processing_errors(sdev))
  1124. goto error_out;
  1125. if (sd_need_revalidate(bdev, sdkp))
  1126. sd_revalidate_disk(bdev->bd_disk);
  1127. /*
  1128. * If the drive is empty, just let the open fail.
  1129. */
  1130. retval = -ENOMEDIUM;
  1131. if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
  1132. goto error_out;
  1133. /*
  1134. * If the device has the write protect tab set, have the open fail
  1135. * if the user expects to be able to write to the thing.
  1136. */
  1137. retval = -EROFS;
  1138. if (sdkp->write_prot && (mode & FMODE_WRITE))
  1139. goto error_out;
  1140. /*
  1141. * It is possible that the disk changing stuff resulted in
  1142. * the device being taken offline. If this is the case,
  1143. * report this to the user, and don't pretend that the
  1144. * open actually succeeded.
  1145. */
  1146. retval = -ENXIO;
  1147. if (!scsi_device_online(sdev))
  1148. goto error_out;
  1149. if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
  1150. if (scsi_block_when_processing_errors(sdev))
  1151. scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
  1152. }
  1153. return 0;
  1154. error_out:
  1155. scsi_device_put(sdev);
  1156. return retval;
  1157. }
  1158. /**
  1159. * sd_release - invoked when the (last) close(2) is called on this
  1160. * scsi disk.
  1161. * @disk: disk to release
  1162. * @mode: FMODE_* mask
  1163. *
  1164. * Returns 0.
  1165. *
  1166. * Note: may block (uninterruptible) if error recovery is underway
  1167. * on this disk.
  1168. *
  1169. * Locking: called with bdev->bd_disk->open_mutex held.
  1170. **/
  1171. static void sd_release(struct gendisk *disk, fmode_t mode)
  1172. {
  1173. struct scsi_disk *sdkp = scsi_disk(disk);
  1174. struct scsi_device *sdev = sdkp->device;
  1175. SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
  1176. if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
  1177. if (scsi_block_when_processing_errors(sdev))
  1178. scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
  1179. }
  1180. scsi_device_put(sdev);
  1181. }
  1182. static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  1183. {
  1184. struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
  1185. struct scsi_device *sdp = sdkp->device;
  1186. struct Scsi_Host *host = sdp->host;
  1187. sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
  1188. int diskinfo[4];
  1189. /* default to most commonly used values */
  1190. diskinfo[0] = 0x40; /* 1 << 6 */
  1191. diskinfo[1] = 0x20; /* 1 << 5 */
  1192. diskinfo[2] = capacity >> 11;
  1193. /* override with calculated, extended default, or driver values */
  1194. if (host->hostt->bios_param)
  1195. host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
  1196. else
  1197. scsicam_bios_param(bdev, capacity, diskinfo);
  1198. geo->heads = diskinfo[0];
  1199. geo->sectors = diskinfo[1];
  1200. geo->cylinders = diskinfo[2];
  1201. return 0;
  1202. }
  1203. /**
  1204. * sd_ioctl - process an ioctl
  1205. * @bdev: target block device
  1206. * @mode: FMODE_* mask
  1207. * @cmd: ioctl command number
  1208. * @arg: this is third argument given to ioctl(2) system call.
  1209. * Often contains a pointer.
  1210. *
  1211. * Returns 0 if successful (some ioctls return positive numbers on
  1212. * success as well). Returns a negated errno value in case of error.
  1213. *
  1214. * Note: most ioctls are forward onto the block subsystem or further
  1215. * down in the scsi subsystem.
  1216. **/
  1217. static int sd_ioctl(struct block_device *bdev, fmode_t mode,
  1218. unsigned int cmd, unsigned long arg)
  1219. {
  1220. struct gendisk *disk = bdev->bd_disk;
  1221. struct scsi_disk *sdkp = scsi_disk(disk);
  1222. struct scsi_device *sdp = sdkp->device;
  1223. void __user *p = (void __user *)arg;
  1224. int error;
  1225. SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
  1226. "cmd=0x%x\n", disk->disk_name, cmd));
  1227. if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
  1228. return -ENOIOCTLCMD;
  1229. /*
  1230. * If we are in the middle of error recovery, don't let anyone
  1231. * else try and use this device. Also, if error recovery fails, it
  1232. * may try and take the device offline, in which case all further
  1233. * access to the device is prohibited.
  1234. */
  1235. error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
  1236. (mode & FMODE_NDELAY) != 0);
  1237. if (error)
  1238. return error;
  1239. if (is_sed_ioctl(cmd))
  1240. return sed_ioctl(sdkp->opal_dev, cmd, p);
  1241. return scsi_ioctl(sdp, mode, cmd, p);
  1242. }
  1243. static void set_media_not_present(struct scsi_disk *sdkp)
  1244. {
  1245. if (sdkp->media_present)
  1246. sdkp->device->changed = 1;
  1247. if (sdkp->device->removable) {
  1248. sdkp->media_present = 0;
  1249. sdkp->capacity = 0;
  1250. }
  1251. }
  1252. static int media_not_present(struct scsi_disk *sdkp,
  1253. struct scsi_sense_hdr *sshdr)
  1254. {
  1255. if (!scsi_sense_valid(sshdr))
  1256. return 0;
  1257. /* not invoked for commands that could return deferred errors */
  1258. switch (sshdr->sense_key) {
  1259. case UNIT_ATTENTION:
  1260. case NOT_READY:
  1261. /* medium not present */
  1262. if (sshdr->asc == 0x3A) {
  1263. set_media_not_present(sdkp);
  1264. return 1;
  1265. }
  1266. }
  1267. return 0;
  1268. }
  1269. /**
  1270. * sd_check_events - check media events
  1271. * @disk: kernel device descriptor
  1272. * @clearing: disk events currently being cleared
  1273. *
  1274. * Returns mask of DISK_EVENT_*.
  1275. *
  1276. * Note: this function is invoked from the block subsystem.
  1277. **/
  1278. static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
  1279. {
  1280. struct scsi_disk *sdkp = disk->private_data;
  1281. struct scsi_device *sdp;
  1282. int retval;
  1283. bool disk_changed;
  1284. if (!sdkp)
  1285. return 0;
  1286. sdp = sdkp->device;
  1287. SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
  1288. /*
  1289. * If the device is offline, don't send any commands - just pretend as
  1290. * if the command failed. If the device ever comes back online, we
  1291. * can deal with it then. It is only because of unrecoverable errors
  1292. * that we would ever take a device offline in the first place.
  1293. */
  1294. if (!scsi_device_online(sdp)) {
  1295. set_media_not_present(sdkp);
  1296. goto out;
  1297. }
  1298. /*
  1299. * Using TEST_UNIT_READY enables differentiation between drive with
  1300. * no cartridge loaded - NOT READY, drive with changed cartridge -
  1301. * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
  1302. *
  1303. * Drives that auto spin down. eg iomega jaz 1G, will be started
  1304. * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
  1305. * sd_revalidate() is called.
  1306. */
  1307. if (scsi_block_when_processing_errors(sdp)) {
  1308. struct scsi_sense_hdr sshdr = { 0, };
  1309. retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
  1310. &sshdr);
  1311. /* failed to execute TUR, assume media not present */
  1312. if (retval < 0 || host_byte(retval)) {
  1313. set_media_not_present(sdkp);
  1314. goto out;
  1315. }
  1316. if (media_not_present(sdkp, &sshdr))
  1317. goto out;
  1318. }
  1319. /*
  1320. * For removable scsi disk we have to recognise the presence
  1321. * of a disk in the drive.
  1322. */
  1323. if (!sdkp->media_present)
  1324. sdp->changed = 1;
  1325. sdkp->media_present = 1;
  1326. out:
  1327. /*
  1328. * sdp->changed is set under the following conditions:
  1329. *
  1330. * Medium present state has changed in either direction.
  1331. * Device has indicated UNIT_ATTENTION.
  1332. */
  1333. disk_changed = sdp->changed;
  1334. sdp->changed = 0;
  1335. return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
  1336. }
  1337. static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
  1338. {
  1339. int retries, res;
  1340. struct scsi_device *sdp = sdkp->device;
  1341. const int timeout = sdp->request_queue->rq_timeout
  1342. * SD_FLUSH_TIMEOUT_MULTIPLIER;
  1343. struct scsi_sense_hdr my_sshdr;
  1344. const struct scsi_exec_args exec_args = {
  1345. .req_flags = BLK_MQ_REQ_PM,
  1346. /* caller might not be interested in sense, but we need it */
  1347. .sshdr = sshdr ? : &my_sshdr,
  1348. };
  1349. if (!scsi_device_online(sdp))
  1350. return -ENODEV;
  1351. sshdr = exec_args.sshdr;
  1352. for (retries = 3; retries > 0; --retries) {
  1353. unsigned char cmd[10] = { 0 };
  1354. cmd[0] = SYNCHRONIZE_CACHE;
  1355. /*
  1356. * Leave the rest of the command zero to indicate
  1357. * flush everything.
  1358. */
  1359. res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
  1360. timeout, sdkp->max_retries, &exec_args);
  1361. if (res == 0)
  1362. break;
  1363. }
  1364. if (res) {
  1365. sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
  1366. if (res < 0)
  1367. return res;
  1368. if (scsi_status_is_check_condition(res) &&
  1369. scsi_sense_valid(sshdr)) {
  1370. sd_print_sense_hdr(sdkp, sshdr);
  1371. /* we need to evaluate the error return */
  1372. if (sshdr->asc == 0x3a || /* medium not present */
  1373. sshdr->asc == 0x20 || /* invalid command */
  1374. (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
  1375. /* this is no error here */
  1376. return 0;
  1377. }
  1378. switch (host_byte(res)) {
  1379. /* ignore errors due to racing a disconnection */
  1380. case DID_BAD_TARGET:
  1381. case DID_NO_CONNECT:
  1382. return 0;
  1383. /* signal the upper layer it might try again */
  1384. case DID_BUS_BUSY:
  1385. case DID_IMM_RETRY:
  1386. case DID_REQUEUE:
  1387. case DID_SOFT_ERROR:
  1388. return -EBUSY;
  1389. default:
  1390. return -EIO;
  1391. }
  1392. }
  1393. return 0;
  1394. }
  1395. static void sd_rescan(struct device *dev)
  1396. {
  1397. struct scsi_disk *sdkp = dev_get_drvdata(dev);
  1398. sd_revalidate_disk(sdkp->disk);
  1399. }
  1400. static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
  1401. enum blk_unique_id type)
  1402. {
  1403. struct scsi_device *sdev = scsi_disk(disk)->device;
  1404. const struct scsi_vpd *vpd;
  1405. const unsigned char *d;
  1406. int ret = -ENXIO, len;
  1407. rcu_read_lock();
  1408. vpd = rcu_dereference(sdev->vpd_pg83);
  1409. if (!vpd)
  1410. goto out_unlock;
  1411. ret = -EINVAL;
  1412. for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
  1413. /* we only care about designators with LU association */
  1414. if (((d[1] >> 4) & 0x3) != 0x00)
  1415. continue;
  1416. if ((d[1] & 0xf) != type)
  1417. continue;
  1418. /*
  1419. * Only exit early if a 16-byte descriptor was found. Otherwise
  1420. * keep looking as one with more entropy might still show up.
  1421. */
  1422. len = d[3];
  1423. if (len != 8 && len != 12 && len != 16)
  1424. continue;
  1425. ret = len;
  1426. memcpy(id, d + 4, len);
  1427. if (len == 16)
  1428. break;
  1429. }
  1430. out_unlock:
  1431. rcu_read_unlock();
  1432. return ret;
  1433. }
  1434. static char sd_pr_type(enum pr_type type)
  1435. {
  1436. switch (type) {
  1437. case PR_WRITE_EXCLUSIVE:
  1438. return 0x01;
  1439. case PR_EXCLUSIVE_ACCESS:
  1440. return 0x03;
  1441. case PR_WRITE_EXCLUSIVE_REG_ONLY:
  1442. return 0x05;
  1443. case PR_EXCLUSIVE_ACCESS_REG_ONLY:
  1444. return 0x06;
  1445. case PR_WRITE_EXCLUSIVE_ALL_REGS:
  1446. return 0x07;
  1447. case PR_EXCLUSIVE_ACCESS_ALL_REGS:
  1448. return 0x08;
  1449. default:
  1450. return 0;
  1451. }
  1452. };
  1453. static int sd_pr_command(struct block_device *bdev, u8 sa,
  1454. u64 key, u64 sa_key, u8 type, u8 flags)
  1455. {
  1456. struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
  1457. struct scsi_device *sdev = sdkp->device;
  1458. struct scsi_sense_hdr sshdr;
  1459. const struct scsi_exec_args exec_args = {
  1460. .sshdr = &sshdr,
  1461. };
  1462. int result;
  1463. u8 cmd[16] = { 0, };
  1464. u8 data[24] = { 0, };
  1465. cmd[0] = PERSISTENT_RESERVE_OUT;
  1466. cmd[1] = sa;
  1467. cmd[2] = type;
  1468. put_unaligned_be32(sizeof(data), &cmd[5]);
  1469. put_unaligned_be64(key, &data[0]);
  1470. put_unaligned_be64(sa_key, &data[8]);
  1471. data[20] = flags;
  1472. result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
  1473. sizeof(data), SD_TIMEOUT, sdkp->max_retries,
  1474. &exec_args);
  1475. if (scsi_status_is_check_condition(result) &&
  1476. scsi_sense_valid(&sshdr)) {
  1477. sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
  1478. scsi_print_sense_hdr(sdev, NULL, &sshdr);
  1479. }
  1480. return result;
  1481. }
  1482. static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
  1483. u32 flags)
  1484. {
  1485. if (flags & ~PR_FL_IGNORE_KEY)
  1486. return -EOPNOTSUPP;
  1487. return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
  1488. old_key, new_key, 0,
  1489. (1 << 0) /* APTPL */);
  1490. }
  1491. static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
  1492. u32 flags)
  1493. {
  1494. if (flags)
  1495. return -EOPNOTSUPP;
  1496. return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
  1497. }
  1498. static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
  1499. {
  1500. return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
  1501. }
  1502. static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
  1503. enum pr_type type, bool abort)
  1504. {
  1505. return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
  1506. sd_pr_type(type), 0);
  1507. }
  1508. static int sd_pr_clear(struct block_device *bdev, u64 key)
  1509. {
  1510. return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
  1511. }
  1512. static const struct pr_ops sd_pr_ops = {
  1513. .pr_register = sd_pr_register,
  1514. .pr_reserve = sd_pr_reserve,
  1515. .pr_release = sd_pr_release,
  1516. .pr_preempt = sd_pr_preempt,
  1517. .pr_clear = sd_pr_clear,
  1518. };
  1519. static void scsi_disk_free_disk(struct gendisk *disk)
  1520. {
  1521. struct scsi_disk *sdkp = scsi_disk(disk);
  1522. put_device(&sdkp->disk_dev);
  1523. }
  1524. static const struct block_device_operations sd_fops = {
  1525. .owner = THIS_MODULE,
  1526. .open = sd_open,
  1527. .release = sd_release,
  1528. .ioctl = sd_ioctl,
  1529. .getgeo = sd_getgeo,
  1530. .compat_ioctl = blkdev_compat_ptr_ioctl,
  1531. .check_events = sd_check_events,
  1532. .unlock_native_capacity = sd_unlock_native_capacity,
  1533. .report_zones = sd_zbc_report_zones,
  1534. .get_unique_id = sd_get_unique_id,
  1535. .free_disk = scsi_disk_free_disk,
  1536. .pr_ops = &sd_pr_ops,
  1537. };
  1538. /**
  1539. * sd_eh_reset - reset error handling callback
  1540. * @scmd: sd-issued command that has failed
  1541. *
  1542. * This function is called by the SCSI midlayer before starting
  1543. * SCSI EH. When counting medium access failures we have to be
  1544. * careful to register it only only once per device and SCSI EH run;
  1545. * there might be several timed out commands which will cause the
  1546. * 'max_medium_access_timeouts' counter to trigger after the first
  1547. * SCSI EH run already and set the device to offline.
  1548. * So this function resets the internal counter before starting SCSI EH.
  1549. **/
  1550. static void sd_eh_reset(struct scsi_cmnd *scmd)
  1551. {
  1552. struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
  1553. /* New SCSI EH run, reset gate variable */
  1554. sdkp->ignore_medium_access_errors = false;
  1555. }
  1556. /**
  1557. * sd_eh_action - error handling callback
  1558. * @scmd: sd-issued command that has failed
  1559. * @eh_disp: The recovery disposition suggested by the midlayer
  1560. *
  1561. * This function is called by the SCSI midlayer upon completion of an
  1562. * error test command (currently TEST UNIT READY). The result of sending
  1563. * the eh command is passed in eh_disp. We're looking for devices that
  1564. * fail medium access commands but are OK with non access commands like
  1565. * test unit ready (so wrongly see the device as having a successful
  1566. * recovery)
  1567. **/
  1568. static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
  1569. {
  1570. struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
  1571. struct scsi_device *sdev = scmd->device;
  1572. if (!scsi_device_online(sdev) ||
  1573. !scsi_medium_access_command(scmd) ||
  1574. host_byte(scmd->result) != DID_TIME_OUT ||
  1575. eh_disp != SUCCESS)
  1576. return eh_disp;
  1577. /*
  1578. * The device has timed out executing a medium access command.
  1579. * However, the TEST UNIT READY command sent during error
  1580. * handling completed successfully. Either the device is in the
  1581. * process of recovering or has it suffered an internal failure
  1582. * that prevents access to the storage medium.
  1583. */
  1584. if (!sdkp->ignore_medium_access_errors) {
  1585. sdkp->medium_access_timed_out++;
  1586. sdkp->ignore_medium_access_errors = true;
  1587. }
  1588. /*
  1589. * If the device keeps failing read/write commands but TEST UNIT
  1590. * READY always completes successfully we assume that medium
  1591. * access is no longer possible and take the device offline.
  1592. */
  1593. if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
  1594. scmd_printk(KERN_ERR, scmd,
  1595. "Medium access timeout failure. Offlining disk!\n");
  1596. mutex_lock(&sdev->state_mutex);
  1597. scsi_device_set_state(sdev, SDEV_OFFLINE);
  1598. mutex_unlock(&sdev->state_mutex);
  1599. return SUCCESS;
  1600. }
  1601. return eh_disp;
  1602. }
  1603. static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
  1604. {
  1605. struct request *req = scsi_cmd_to_rq(scmd);
  1606. struct scsi_device *sdev = scmd->device;
  1607. unsigned int transferred, good_bytes;
  1608. u64 start_lba, end_lba, bad_lba;
  1609. /*
  1610. * Some commands have a payload smaller than the device logical
  1611. * block size (e.g. INQUIRY on a 4K disk).
  1612. */
  1613. if (scsi_bufflen(scmd) <= sdev->sector_size)
  1614. return 0;
  1615. /* Check if we have a 'bad_lba' information */
  1616. if (!scsi_get_sense_info_fld(scmd->sense_buffer,
  1617. SCSI_SENSE_BUFFERSIZE,
  1618. &bad_lba))
  1619. return 0;
  1620. /*
  1621. * If the bad lba was reported incorrectly, we have no idea where
  1622. * the error is.
  1623. */
  1624. start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
  1625. end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
  1626. if (bad_lba < start_lba || bad_lba >= end_lba)
  1627. return 0;
  1628. /*
  1629. * resid is optional but mostly filled in. When it's unused,
  1630. * its value is zero, so we assume the whole buffer transferred
  1631. */
  1632. transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
  1633. /* This computation should always be done in terms of the
  1634. * resolution of the device's medium.
  1635. */
  1636. good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
  1637. return min(good_bytes, transferred);
  1638. }
  1639. /**
  1640. * sd_done - bottom half handler: called when the lower level
  1641. * driver has completed (successfully or otherwise) a scsi command.
  1642. * @SCpnt: mid-level's per command structure.
  1643. *
  1644. * Note: potentially run from within an ISR. Must not block.
  1645. **/
  1646. static int sd_done(struct scsi_cmnd *SCpnt)
  1647. {
  1648. int result = SCpnt->result;
  1649. unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
  1650. unsigned int sector_size = SCpnt->device->sector_size;
  1651. unsigned int resid;
  1652. struct scsi_sense_hdr sshdr;
  1653. struct request *req = scsi_cmd_to_rq(SCpnt);
  1654. struct scsi_disk *sdkp = scsi_disk(req->q->disk);
  1655. int sense_valid = 0;
  1656. int sense_deferred = 0;
  1657. switch (req_op(req)) {
  1658. case REQ_OP_DISCARD:
  1659. case REQ_OP_WRITE_ZEROES:
  1660. case REQ_OP_ZONE_RESET:
  1661. case REQ_OP_ZONE_RESET_ALL:
  1662. case REQ_OP_ZONE_OPEN:
  1663. case REQ_OP_ZONE_CLOSE:
  1664. case REQ_OP_ZONE_FINISH:
  1665. if (!result) {
  1666. good_bytes = blk_rq_bytes(req);
  1667. scsi_set_resid(SCpnt, 0);
  1668. } else {
  1669. good_bytes = 0;
  1670. scsi_set_resid(SCpnt, blk_rq_bytes(req));
  1671. }
  1672. break;
  1673. default:
  1674. /*
  1675. * In case of bogus fw or device, we could end up having
  1676. * an unaligned partial completion. Check this here and force
  1677. * alignment.
  1678. */
  1679. resid = scsi_get_resid(SCpnt);
  1680. if (resid & (sector_size - 1)) {
  1681. sd_printk(KERN_INFO, sdkp,
  1682. "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
  1683. resid, sector_size);
  1684. scsi_print_command(SCpnt);
  1685. resid = min(scsi_bufflen(SCpnt),
  1686. round_up(resid, sector_size));
  1687. scsi_set_resid(SCpnt, resid);
  1688. }
  1689. }
  1690. if (result) {
  1691. sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
  1692. if (sense_valid)
  1693. sense_deferred = scsi_sense_is_deferred(&sshdr);
  1694. }
  1695. sdkp->medium_access_timed_out = 0;
  1696. if (!scsi_status_is_check_condition(result) &&
  1697. (!sense_valid || sense_deferred))
  1698. goto out;
  1699. switch (sshdr.sense_key) {
  1700. case HARDWARE_ERROR:
  1701. case MEDIUM_ERROR:
  1702. good_bytes = sd_completed_bytes(SCpnt);
  1703. break;
  1704. case RECOVERED_ERROR:
  1705. good_bytes = scsi_bufflen(SCpnt);
  1706. break;
  1707. case NO_SENSE:
  1708. /* This indicates a false check condition, so ignore it. An
  1709. * unknown amount of data was transferred so treat it as an
  1710. * error.
  1711. */
  1712. SCpnt->result = 0;
  1713. memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1714. break;
  1715. case ABORTED_COMMAND:
  1716. if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
  1717. good_bytes = sd_completed_bytes(SCpnt);
  1718. break;
  1719. case ILLEGAL_REQUEST:
  1720. switch (sshdr.asc) {
  1721. case 0x10: /* DIX: Host detected corruption */
  1722. good_bytes = sd_completed_bytes(SCpnt);
  1723. break;
  1724. case 0x20: /* INVALID COMMAND OPCODE */
  1725. case 0x24: /* INVALID FIELD IN CDB */
  1726. switch (SCpnt->cmnd[0]) {
  1727. case UNMAP:
  1728. sd_config_discard(sdkp, SD_LBP_DISABLE);
  1729. break;
  1730. case WRITE_SAME_16:
  1731. case WRITE_SAME:
  1732. if (SCpnt->cmnd[1] & 8) { /* UNMAP */
  1733. sd_config_discard(sdkp, SD_LBP_DISABLE);
  1734. } else {
  1735. sdkp->device->no_write_same = 1;
  1736. sd_config_write_same(sdkp);
  1737. req->rq_flags |= RQF_QUIET;
  1738. }
  1739. break;
  1740. }
  1741. }
  1742. break;
  1743. default:
  1744. break;
  1745. }
  1746. out:
  1747. if (sd_is_zoned(sdkp))
  1748. good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
  1749. SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
  1750. "sd_done: completed %d of %d bytes\n",
  1751. good_bytes, scsi_bufflen(SCpnt)));
  1752. return good_bytes;
  1753. }
  1754. /*
  1755. * spinup disk - called only in sd_revalidate_disk()
  1756. */
  1757. static void
  1758. sd_spinup_disk(struct scsi_disk *sdkp)
  1759. {
  1760. unsigned char cmd[10];
  1761. unsigned long spintime_expire = 0;
  1762. int retries, spintime;
  1763. unsigned int the_result;
  1764. struct scsi_sense_hdr sshdr;
  1765. const struct scsi_exec_args exec_args = {
  1766. .sshdr = &sshdr,
  1767. };
  1768. int sense_valid = 0;
  1769. spintime = 0;
  1770. /* Spin up drives, as required. Only do this at boot time */
  1771. /* Spinup needs to be done for module loads too. */
  1772. do {
  1773. retries = 0;
  1774. do {
  1775. bool media_was_present = sdkp->media_present;
  1776. cmd[0] = TEST_UNIT_READY;
  1777. memset((void *) &cmd[1], 0, 9);
  1778. the_result = scsi_execute_cmd(sdkp->device, cmd,
  1779. REQ_OP_DRV_IN, NULL, 0,
  1780. SD_TIMEOUT,
  1781. sdkp->max_retries,
  1782. &exec_args);
  1783. /*
  1784. * If the drive has indicated to us that it
  1785. * doesn't have any media in it, don't bother
  1786. * with any more polling.
  1787. */
  1788. if (media_not_present(sdkp, &sshdr)) {
  1789. if (media_was_present)
  1790. sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
  1791. return;
  1792. }
  1793. if (the_result)
  1794. sense_valid = scsi_sense_valid(&sshdr);
  1795. retries++;
  1796. } while (retries < 3 &&
  1797. (!scsi_status_is_good(the_result) ||
  1798. (scsi_status_is_check_condition(the_result) &&
  1799. sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
  1800. if (!scsi_status_is_check_condition(the_result)) {
  1801. /* no sense, TUR either succeeded or failed
  1802. * with a status error */
  1803. if(!spintime && !scsi_status_is_good(the_result)) {
  1804. sd_print_result(sdkp, "Test Unit Ready failed",
  1805. the_result);
  1806. }
  1807. break;
  1808. }
  1809. /*
  1810. * The device does not want the automatic start to be issued.
  1811. */
  1812. if (sdkp->device->no_start_on_add)
  1813. break;
  1814. if (sense_valid && sshdr.sense_key == NOT_READY) {
  1815. if (sshdr.asc == 4 && sshdr.ascq == 3)
  1816. break; /* manual intervention required */
  1817. if (sshdr.asc == 4 && sshdr.ascq == 0xb)
  1818. break; /* standby */
  1819. if (sshdr.asc == 4 && sshdr.ascq == 0xc)
  1820. break; /* unavailable */
  1821. if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
  1822. break; /* sanitize in progress */
  1823. /*
  1824. * Issue command to spin up drive when not ready
  1825. */
  1826. if (!spintime) {
  1827. sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
  1828. cmd[0] = START_STOP;
  1829. cmd[1] = 1; /* Return immediately */
  1830. memset((void *) &cmd[2], 0, 8);
  1831. cmd[4] = 1; /* Start spin cycle */
  1832. if (sdkp->device->start_stop_pwr_cond)
  1833. cmd[4] |= 1 << 4;
  1834. scsi_execute_cmd(sdkp->device, cmd,
  1835. REQ_OP_DRV_IN, NULL, 0,
  1836. SD_TIMEOUT, sdkp->max_retries,
  1837. &exec_args);
  1838. spintime_expire = jiffies + 100 * HZ;
  1839. spintime = 1;
  1840. }
  1841. /* Wait 1 second for next try */
  1842. msleep(1000);
  1843. printk(KERN_CONT ".");
  1844. /*
  1845. * Wait for USB flash devices with slow firmware.
  1846. * Yes, this sense key/ASC combination shouldn't
  1847. * occur here. It's characteristic of these devices.
  1848. */
  1849. } else if (sense_valid &&
  1850. sshdr.sense_key == UNIT_ATTENTION &&
  1851. sshdr.asc == 0x28) {
  1852. if (!spintime) {
  1853. spintime_expire = jiffies + 5 * HZ;
  1854. spintime = 1;
  1855. }
  1856. /* Wait 1 second for next try */
  1857. msleep(1000);
  1858. } else {
  1859. /* we don't understand the sense code, so it's
  1860. * probably pointless to loop */
  1861. if(!spintime) {
  1862. sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
  1863. sd_print_sense_hdr(sdkp, &sshdr);
  1864. }
  1865. break;
  1866. }
  1867. } while (spintime && time_before_eq(jiffies, spintime_expire));
  1868. if (spintime) {
  1869. if (scsi_status_is_good(the_result))
  1870. printk(KERN_CONT "ready\n");
  1871. else
  1872. printk(KERN_CONT "not responding...\n");
  1873. }
  1874. }
  1875. /*
  1876. * Determine whether disk supports Data Integrity Field.
  1877. */
  1878. static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
  1879. {
  1880. struct scsi_device *sdp = sdkp->device;
  1881. u8 type;
  1882. if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
  1883. sdkp->protection_type = 0;
  1884. return 0;
  1885. }
  1886. type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
  1887. if (type > T10_PI_TYPE3_PROTECTION) {
  1888. sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
  1889. " protection type %u. Disabling disk!\n",
  1890. type);
  1891. sdkp->protection_type = 0;
  1892. return -ENODEV;
  1893. }
  1894. sdkp->protection_type = type;
  1895. return 0;
  1896. }
  1897. static void sd_config_protection(struct scsi_disk *sdkp)
  1898. {
  1899. struct scsi_device *sdp = sdkp->device;
  1900. if (!sdkp->first_scan)
  1901. return;
  1902. sd_dif_config_host(sdkp);
  1903. if (!sdkp->protection_type)
  1904. return;
  1905. if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
  1906. sd_printk(KERN_NOTICE, sdkp,
  1907. "Disabling DIF Type %u protection\n",
  1908. sdkp->protection_type);
  1909. sdkp->protection_type = 0;
  1910. }
  1911. sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
  1912. sdkp->protection_type);
  1913. }
  1914. static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
  1915. struct scsi_sense_hdr *sshdr, int sense_valid,
  1916. int the_result)
  1917. {
  1918. if (sense_valid)
  1919. sd_print_sense_hdr(sdkp, sshdr);
  1920. else
  1921. sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
  1922. /*
  1923. * Set dirty bit for removable devices if not ready -
  1924. * sometimes drives will not report this properly.
  1925. */
  1926. if (sdp->removable &&
  1927. sense_valid && sshdr->sense_key == NOT_READY)
  1928. set_media_not_present(sdkp);
  1929. /*
  1930. * We used to set media_present to 0 here to indicate no media
  1931. * in the drive, but some drives fail read capacity even with
  1932. * media present, so we can't do that.
  1933. */
  1934. sdkp->capacity = 0; /* unknown mapped to zero - as usual */
  1935. }
  1936. #define RC16_LEN 32
  1937. #if RC16_LEN > SD_BUF_SIZE
  1938. #error RC16_LEN must not be more than SD_BUF_SIZE
  1939. #endif
  1940. #define READ_CAPACITY_RETRIES_ON_RESET 10
  1941. static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
  1942. unsigned char *buffer)
  1943. {
  1944. unsigned char cmd[16];
  1945. struct scsi_sense_hdr sshdr;
  1946. const struct scsi_exec_args exec_args = {
  1947. .sshdr = &sshdr,
  1948. };
  1949. int sense_valid = 0;
  1950. int the_result;
  1951. int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
  1952. unsigned int alignment;
  1953. unsigned long long lba;
  1954. unsigned sector_size;
  1955. if (sdp->no_read_capacity_16)
  1956. return -EINVAL;
  1957. do {
  1958. memset(cmd, 0, 16);
  1959. cmd[0] = SERVICE_ACTION_IN_16;
  1960. cmd[1] = SAI_READ_CAPACITY_16;
  1961. cmd[13] = RC16_LEN;
  1962. memset(buffer, 0, RC16_LEN);
  1963. the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
  1964. buffer, RC16_LEN, SD_TIMEOUT,
  1965. sdkp->max_retries, &exec_args);
  1966. if (media_not_present(sdkp, &sshdr))
  1967. return -ENODEV;
  1968. if (the_result > 0) {
  1969. sense_valid = scsi_sense_valid(&sshdr);
  1970. if (sense_valid &&
  1971. sshdr.sense_key == ILLEGAL_REQUEST &&
  1972. (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
  1973. sshdr.ascq == 0x00)
  1974. /* Invalid Command Operation Code or
  1975. * Invalid Field in CDB, just retry
  1976. * silently with RC10 */
  1977. return -EINVAL;
  1978. if (sense_valid &&
  1979. sshdr.sense_key == UNIT_ATTENTION &&
  1980. sshdr.asc == 0x29 && sshdr.ascq == 0x00)
  1981. /* Device reset might occur several times,
  1982. * give it one more chance */
  1983. if (--reset_retries > 0)
  1984. continue;
  1985. }
  1986. retries--;
  1987. } while (the_result && retries);
  1988. if (the_result) {
  1989. sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
  1990. read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
  1991. return -EINVAL;
  1992. }
  1993. sector_size = get_unaligned_be32(&buffer[8]);
  1994. lba = get_unaligned_be64(&buffer[0]);
  1995. if (sd_read_protection_type(sdkp, buffer) < 0) {
  1996. sdkp->capacity = 0;
  1997. return -ENODEV;
  1998. }
  1999. /* Logical blocks per physical block exponent */
  2000. sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
  2001. /* RC basis */
  2002. sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
  2003. /* Lowest aligned logical block */
  2004. alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
  2005. blk_queue_alignment_offset(sdp->request_queue, alignment);
  2006. if (alignment && sdkp->first_scan)
  2007. sd_printk(KERN_NOTICE, sdkp,
  2008. "physical block alignment offset: %u\n", alignment);
  2009. if (buffer[14] & 0x80) { /* LBPME */
  2010. sdkp->lbpme = 1;
  2011. if (buffer[14] & 0x40) /* LBPRZ */
  2012. sdkp->lbprz = 1;
  2013. sd_config_discard(sdkp, SD_LBP_WS16);
  2014. }
  2015. sdkp->capacity = lba + 1;
  2016. return sector_size;
  2017. }
  2018. static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
  2019. unsigned char *buffer)
  2020. {
  2021. unsigned char cmd[16];
  2022. struct scsi_sense_hdr sshdr;
  2023. const struct scsi_exec_args exec_args = {
  2024. .sshdr = &sshdr,
  2025. };
  2026. int sense_valid = 0;
  2027. int the_result;
  2028. int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
  2029. sector_t lba;
  2030. unsigned sector_size;
  2031. do {
  2032. cmd[0] = READ_CAPACITY;
  2033. memset(&cmd[1], 0, 9);
  2034. memset(buffer, 0, 8);
  2035. the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
  2036. 8, SD_TIMEOUT, sdkp->max_retries,
  2037. &exec_args);
  2038. if (media_not_present(sdkp, &sshdr))
  2039. return -ENODEV;
  2040. if (the_result > 0) {
  2041. sense_valid = scsi_sense_valid(&sshdr);
  2042. if (sense_valid &&
  2043. sshdr.sense_key == UNIT_ATTENTION &&
  2044. sshdr.asc == 0x29 && sshdr.ascq == 0x00)
  2045. /* Device reset might occur several times,
  2046. * give it one more chance */
  2047. if (--reset_retries > 0)
  2048. continue;
  2049. }
  2050. retries--;
  2051. } while (the_result && retries);
  2052. if (the_result) {
  2053. sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
  2054. read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
  2055. return -EINVAL;
  2056. }
  2057. sector_size = get_unaligned_be32(&buffer[4]);
  2058. lba = get_unaligned_be32(&buffer[0]);
  2059. if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
  2060. /* Some buggy (usb cardreader) devices return an lba of
  2061. 0xffffffff when the want to report a size of 0 (with
  2062. which they really mean no media is present) */
  2063. sdkp->capacity = 0;
  2064. sdkp->physical_block_size = sector_size;
  2065. return sector_size;
  2066. }
  2067. sdkp->capacity = lba + 1;
  2068. sdkp->physical_block_size = sector_size;
  2069. return sector_size;
  2070. }
  2071. static int sd_try_rc16_first(struct scsi_device *sdp)
  2072. {
  2073. if (sdp->host->max_cmd_len < 16)
  2074. return 0;
  2075. if (sdp->try_rc_10_first)
  2076. return 0;
  2077. if (sdp->scsi_level > SCSI_SPC_2)
  2078. return 1;
  2079. if (scsi_device_protection(sdp))
  2080. return 1;
  2081. return 0;
  2082. }
  2083. /*
  2084. * read disk capacity
  2085. */
  2086. static void
  2087. sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
  2088. {
  2089. int sector_size;
  2090. struct scsi_device *sdp = sdkp->device;
  2091. if (sd_try_rc16_first(sdp)) {
  2092. sector_size = read_capacity_16(sdkp, sdp, buffer);
  2093. if (sector_size == -EOVERFLOW)
  2094. goto got_data;
  2095. if (sector_size == -ENODEV)
  2096. return;
  2097. if (sector_size < 0)
  2098. sector_size = read_capacity_10(sdkp, sdp, buffer);
  2099. if (sector_size < 0)
  2100. return;
  2101. } else {
  2102. sector_size = read_capacity_10(sdkp, sdp, buffer);
  2103. if (sector_size == -EOVERFLOW)
  2104. goto got_data;
  2105. if (sector_size < 0)
  2106. return;
  2107. if ((sizeof(sdkp->capacity) > 4) &&
  2108. (sdkp->capacity > 0xffffffffULL)) {
  2109. int old_sector_size = sector_size;
  2110. sd_printk(KERN_NOTICE, sdkp, "Very big device. "
  2111. "Trying to use READ CAPACITY(16).\n");
  2112. sector_size = read_capacity_16(sdkp, sdp, buffer);
  2113. if (sector_size < 0) {
  2114. sd_printk(KERN_NOTICE, sdkp,
  2115. "Using 0xffffffff as device size\n");
  2116. sdkp->capacity = 1 + (sector_t) 0xffffffff;
  2117. sector_size = old_sector_size;
  2118. goto got_data;
  2119. }
  2120. /* Remember that READ CAPACITY(16) succeeded */
  2121. sdp->try_rc_10_first = 0;
  2122. }
  2123. }
  2124. /* Some devices are known to return the total number of blocks,
  2125. * not the highest block number. Some devices have versions
  2126. * which do this and others which do not. Some devices we might
  2127. * suspect of doing this but we don't know for certain.
  2128. *
  2129. * If we know the reported capacity is wrong, decrement it. If
  2130. * we can only guess, then assume the number of blocks is even
  2131. * (usually true but not always) and err on the side of lowering
  2132. * the capacity.
  2133. */
  2134. if (sdp->fix_capacity ||
  2135. (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
  2136. sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
  2137. "from its reported value: %llu\n",
  2138. (unsigned long long) sdkp->capacity);
  2139. --sdkp->capacity;
  2140. }
  2141. got_data:
  2142. if (sector_size == 0) {
  2143. sector_size = 512;
  2144. sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
  2145. "assuming 512.\n");
  2146. }
  2147. if (sector_size != 512 &&
  2148. sector_size != 1024 &&
  2149. sector_size != 2048 &&
  2150. sector_size != 4096) {
  2151. sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
  2152. sector_size);
  2153. /*
  2154. * The user might want to re-format the drive with
  2155. * a supported sectorsize. Once this happens, it
  2156. * would be relatively trivial to set the thing up.
  2157. * For this reason, we leave the thing in the table.
  2158. */
  2159. sdkp->capacity = 0;
  2160. /*
  2161. * set a bogus sector size so the normal read/write
  2162. * logic in the block layer will eventually refuse any
  2163. * request on this device without tripping over power
  2164. * of two sector size assumptions
  2165. */
  2166. sector_size = 512;
  2167. }
  2168. blk_queue_logical_block_size(sdp->request_queue, sector_size);
  2169. blk_queue_physical_block_size(sdp->request_queue,
  2170. sdkp->physical_block_size);
  2171. sdkp->device->sector_size = sector_size;
  2172. if (sdkp->capacity > 0xffffffff)
  2173. sdp->use_16_for_rw = 1;
  2174. }
  2175. /*
  2176. * Print disk capacity
  2177. */
  2178. static void
  2179. sd_print_capacity(struct scsi_disk *sdkp,
  2180. sector_t old_capacity)
  2181. {
  2182. int sector_size = sdkp->device->sector_size;
  2183. char cap_str_2[10], cap_str_10[10];
  2184. if (!sdkp->first_scan && old_capacity == sdkp->capacity)
  2185. return;
  2186. string_get_size(sdkp->capacity, sector_size,
  2187. STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
  2188. string_get_size(sdkp->capacity, sector_size,
  2189. STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
  2190. sd_printk(KERN_NOTICE, sdkp,
  2191. "%llu %d-byte logical blocks: (%s/%s)\n",
  2192. (unsigned long long)sdkp->capacity,
  2193. sector_size, cap_str_10, cap_str_2);
  2194. if (sdkp->physical_block_size != sector_size)
  2195. sd_printk(KERN_NOTICE, sdkp,
  2196. "%u-byte physical blocks\n",
  2197. sdkp->physical_block_size);
  2198. }
  2199. /* called with buffer of length 512 */
  2200. static inline int
  2201. sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
  2202. unsigned char *buffer, int len, struct scsi_mode_data *data,
  2203. struct scsi_sense_hdr *sshdr)
  2204. {
  2205. /*
  2206. * If we must use MODE SENSE(10), make sure that the buffer length
  2207. * is at least 8 bytes so that the mode sense header fits.
  2208. */
  2209. if (sdkp->device->use_10_for_ms && len < 8)
  2210. len = 8;
  2211. return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
  2212. SD_TIMEOUT, sdkp->max_retries, data,
  2213. sshdr);
  2214. }
  2215. /*
  2216. * read write protect setting, if possible - called only in sd_revalidate_disk()
  2217. * called with buffer of length SD_BUF_SIZE
  2218. */
  2219. static void
  2220. sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
  2221. {
  2222. int res;
  2223. struct scsi_device *sdp = sdkp->device;
  2224. struct scsi_mode_data data;
  2225. int old_wp = sdkp->write_prot;
  2226. set_disk_ro(sdkp->disk, 0);
  2227. if (sdp->skip_ms_page_3f) {
  2228. sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
  2229. return;
  2230. }
  2231. if (sdp->use_192_bytes_for_3f) {
  2232. res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
  2233. } else {
  2234. /*
  2235. * First attempt: ask for all pages (0x3F), but only 4 bytes.
  2236. * We have to start carefully: some devices hang if we ask
  2237. * for more than is available.
  2238. */
  2239. res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
  2240. /*
  2241. * Second attempt: ask for page 0 When only page 0 is
  2242. * implemented, a request for page 3F may return Sense Key
  2243. * 5: Illegal Request, Sense Code 24: Invalid field in
  2244. * CDB.
  2245. */
  2246. if (res < 0)
  2247. res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
  2248. /*
  2249. * Third attempt: ask 255 bytes, as we did earlier.
  2250. */
  2251. if (res < 0)
  2252. res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
  2253. &data, NULL);
  2254. }
  2255. if (res < 0) {
  2256. sd_first_printk(KERN_WARNING, sdkp,
  2257. "Test WP failed, assume Write Enabled\n");
  2258. } else {
  2259. sdkp->write_prot = ((data.device_specific & 0x80) != 0);
  2260. set_disk_ro(sdkp->disk, sdkp->write_prot);
  2261. if (sdkp->first_scan || old_wp != sdkp->write_prot) {
  2262. sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
  2263. sdkp->write_prot ? "on" : "off");
  2264. sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
  2265. }
  2266. }
  2267. }
  2268. /*
  2269. * sd_read_cache_type - called only from sd_revalidate_disk()
  2270. * called with buffer of length SD_BUF_SIZE
  2271. */
  2272. static void
  2273. sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
  2274. {
  2275. int len = 0, res;
  2276. struct scsi_device *sdp = sdkp->device;
  2277. int dbd;
  2278. int modepage;
  2279. int first_len;
  2280. struct scsi_mode_data data;
  2281. struct scsi_sense_hdr sshdr;
  2282. int old_wce = sdkp->WCE;
  2283. int old_rcd = sdkp->RCD;
  2284. int old_dpofua = sdkp->DPOFUA;
  2285. if (sdkp->cache_override)
  2286. return;
  2287. first_len = 4;
  2288. if (sdp->skip_ms_page_8) {
  2289. if (sdp->type == TYPE_RBC)
  2290. goto defaults;
  2291. else {
  2292. if (sdp->skip_ms_page_3f)
  2293. goto defaults;
  2294. modepage = 0x3F;
  2295. if (sdp->use_192_bytes_for_3f)
  2296. first_len = 192;
  2297. dbd = 0;
  2298. }
  2299. } else if (sdp->type == TYPE_RBC) {
  2300. modepage = 6;
  2301. dbd = 8;
  2302. } else {
  2303. modepage = 8;
  2304. dbd = 0;
  2305. }
  2306. /* cautiously ask */
  2307. res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
  2308. &data, &sshdr);
  2309. if (res < 0)
  2310. goto bad_sense;
  2311. if (!data.header_length) {
  2312. modepage = 6;
  2313. first_len = 0;
  2314. sd_first_printk(KERN_ERR, sdkp,
  2315. "Missing header in MODE_SENSE response\n");
  2316. }
  2317. /* that went OK, now ask for the proper length */
  2318. len = data.length;
  2319. /*
  2320. * We're only interested in the first three bytes, actually.
  2321. * But the data cache page is defined for the first 20.
  2322. */
  2323. if (len < 3)
  2324. goto bad_sense;
  2325. else if (len > SD_BUF_SIZE) {
  2326. sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
  2327. "data from %d to %d bytes\n", len, SD_BUF_SIZE);
  2328. len = SD_BUF_SIZE;
  2329. }
  2330. if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
  2331. len = 192;
  2332. /* Get the data */
  2333. if (len > first_len)
  2334. res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
  2335. &data, &sshdr);
  2336. if (!res) {
  2337. int offset = data.header_length + data.block_descriptor_length;
  2338. while (offset < len) {
  2339. u8 page_code = buffer[offset] & 0x3F;
  2340. u8 spf = buffer[offset] & 0x40;
  2341. if (page_code == 8 || page_code == 6) {
  2342. /* We're interested only in the first 3 bytes.
  2343. */
  2344. if (len - offset <= 2) {
  2345. sd_first_printk(KERN_ERR, sdkp,
  2346. "Incomplete mode parameter "
  2347. "data\n");
  2348. goto defaults;
  2349. } else {
  2350. modepage = page_code;
  2351. goto Page_found;
  2352. }
  2353. } else {
  2354. /* Go to the next page */
  2355. if (spf && len - offset > 3)
  2356. offset += 4 + (buffer[offset+2] << 8) +
  2357. buffer[offset+3];
  2358. else if (!spf && len - offset > 1)
  2359. offset += 2 + buffer[offset+1];
  2360. else {
  2361. sd_first_printk(KERN_ERR, sdkp,
  2362. "Incomplete mode "
  2363. "parameter data\n");
  2364. goto defaults;
  2365. }
  2366. }
  2367. }
  2368. sd_first_printk(KERN_WARNING, sdkp,
  2369. "No Caching mode page found\n");
  2370. goto defaults;
  2371. Page_found:
  2372. if (modepage == 8) {
  2373. sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
  2374. sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
  2375. } else {
  2376. sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
  2377. sdkp->RCD = 0;
  2378. }
  2379. sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
  2380. if (sdp->broken_fua) {
  2381. sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
  2382. sdkp->DPOFUA = 0;
  2383. } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
  2384. !sdkp->device->use_16_for_rw) {
  2385. sd_first_printk(KERN_NOTICE, sdkp,
  2386. "Uses READ/WRITE(6), disabling FUA\n");
  2387. sdkp->DPOFUA = 0;
  2388. }
  2389. /* No cache flush allowed for write protected devices */
  2390. if (sdkp->WCE && sdkp->write_prot)
  2391. sdkp->WCE = 0;
  2392. if (sdkp->first_scan || old_wce != sdkp->WCE ||
  2393. old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
  2394. sd_printk(KERN_NOTICE, sdkp,
  2395. "Write cache: %s, read cache: %s, %s\n",
  2396. sdkp->WCE ? "enabled" : "disabled",
  2397. sdkp->RCD ? "disabled" : "enabled",
  2398. sdkp->DPOFUA ? "supports DPO and FUA"
  2399. : "doesn't support DPO or FUA");
  2400. return;
  2401. }
  2402. bad_sense:
  2403. if (scsi_sense_valid(&sshdr) &&
  2404. sshdr.sense_key == ILLEGAL_REQUEST &&
  2405. sshdr.asc == 0x24 && sshdr.ascq == 0x0)
  2406. /* Invalid field in CDB */
  2407. sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
  2408. else
  2409. sd_first_printk(KERN_ERR, sdkp,
  2410. "Asking for cache data failed\n");
  2411. defaults:
  2412. if (sdp->wce_default_on) {
  2413. sd_first_printk(KERN_NOTICE, sdkp,
  2414. "Assuming drive cache: write back\n");
  2415. sdkp->WCE = 1;
  2416. } else {
  2417. sd_first_printk(KERN_WARNING, sdkp,
  2418. "Assuming drive cache: write through\n");
  2419. sdkp->WCE = 0;
  2420. }
  2421. sdkp->RCD = 0;
  2422. sdkp->DPOFUA = 0;
  2423. }
  2424. /*
  2425. * The ATO bit indicates whether the DIF application tag is available
  2426. * for use by the operating system.
  2427. */
  2428. static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
  2429. {
  2430. int res, offset;
  2431. struct scsi_device *sdp = sdkp->device;
  2432. struct scsi_mode_data data;
  2433. struct scsi_sense_hdr sshdr;
  2434. if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
  2435. return;
  2436. if (sdkp->protection_type == 0)
  2437. return;
  2438. res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
  2439. sdkp->max_retries, &data, &sshdr);
  2440. if (res < 0 || !data.header_length ||
  2441. data.length < 6) {
  2442. sd_first_printk(KERN_WARNING, sdkp,
  2443. "getting Control mode page failed, assume no ATO\n");
  2444. if (scsi_sense_valid(&sshdr))
  2445. sd_print_sense_hdr(sdkp, &sshdr);
  2446. return;
  2447. }
  2448. offset = data.header_length + data.block_descriptor_length;
  2449. if ((buffer[offset] & 0x3f) != 0x0a) {
  2450. sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
  2451. return;
  2452. }
  2453. if ((buffer[offset + 5] & 0x80) == 0)
  2454. return;
  2455. sdkp->ATO = 1;
  2456. return;
  2457. }
  2458. /**
  2459. * sd_read_block_limits - Query disk device for preferred I/O sizes.
  2460. * @sdkp: disk to query
  2461. */
  2462. static void sd_read_block_limits(struct scsi_disk *sdkp)
  2463. {
  2464. struct scsi_vpd *vpd;
  2465. rcu_read_lock();
  2466. vpd = rcu_dereference(sdkp->device->vpd_pgb0);
  2467. if (!vpd || vpd->len < 16)
  2468. goto out;
  2469. sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
  2470. sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
  2471. sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
  2472. if (vpd->len >= 64) {
  2473. unsigned int lba_count, desc_count;
  2474. sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
  2475. if (!sdkp->lbpme)
  2476. goto out;
  2477. lba_count = get_unaligned_be32(&vpd->data[20]);
  2478. desc_count = get_unaligned_be32(&vpd->data[24]);
  2479. if (lba_count && desc_count)
  2480. sdkp->max_unmap_blocks = lba_count;
  2481. sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
  2482. if (vpd->data[32] & 0x80)
  2483. sdkp->unmap_alignment =
  2484. get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
  2485. if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
  2486. if (sdkp->max_unmap_blocks)
  2487. sd_config_discard(sdkp, SD_LBP_UNMAP);
  2488. else
  2489. sd_config_discard(sdkp, SD_LBP_WS16);
  2490. } else { /* LBP VPD page tells us what to use */
  2491. if (sdkp->lbpu && sdkp->max_unmap_blocks)
  2492. sd_config_discard(sdkp, SD_LBP_UNMAP);
  2493. else if (sdkp->lbpws)
  2494. sd_config_discard(sdkp, SD_LBP_WS16);
  2495. else if (sdkp->lbpws10)
  2496. sd_config_discard(sdkp, SD_LBP_WS10);
  2497. else
  2498. sd_config_discard(sdkp, SD_LBP_DISABLE);
  2499. }
  2500. }
  2501. out:
  2502. rcu_read_unlock();
  2503. }
  2504. /**
  2505. * sd_read_block_characteristics - Query block dev. characteristics
  2506. * @sdkp: disk to query
  2507. */
  2508. static void sd_read_block_characteristics(struct scsi_disk *sdkp)
  2509. {
  2510. struct request_queue *q = sdkp->disk->queue;
  2511. struct scsi_vpd *vpd;
  2512. u16 rot;
  2513. u8 zoned;
  2514. rcu_read_lock();
  2515. vpd = rcu_dereference(sdkp->device->vpd_pgb1);
  2516. if (!vpd || vpd->len < 8) {
  2517. rcu_read_unlock();
  2518. return;
  2519. }
  2520. rot = get_unaligned_be16(&vpd->data[4]);
  2521. zoned = (vpd->data[8] >> 4) & 3;
  2522. rcu_read_unlock();
  2523. if (rot == 1) {
  2524. blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
  2525. blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
  2526. }
  2527. if (sdkp->device->type == TYPE_ZBC) {
  2528. /*
  2529. * Host-managed: Per ZBC and ZAC specifications, writes in
  2530. * sequential write required zones of host-managed devices must
  2531. * be aligned to the device physical block size.
  2532. */
  2533. disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
  2534. blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
  2535. } else {
  2536. sdkp->zoned = zoned;
  2537. if (sdkp->zoned == 1) {
  2538. /* Host-aware */
  2539. disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
  2540. } else {
  2541. /* Regular disk or drive managed disk */
  2542. disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
  2543. }
  2544. }
  2545. if (!sdkp->first_scan)
  2546. return;
  2547. if (blk_queue_is_zoned(q)) {
  2548. sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
  2549. q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
  2550. } else {
  2551. if (sdkp->zoned == 1)
  2552. sd_printk(KERN_NOTICE, sdkp,
  2553. "Host-aware SMR disk used as regular disk\n");
  2554. else if (sdkp->zoned == 2)
  2555. sd_printk(KERN_NOTICE, sdkp,
  2556. "Drive-managed SMR disk\n");
  2557. }
  2558. }
  2559. /**
  2560. * sd_read_block_provisioning - Query provisioning VPD page
  2561. * @sdkp: disk to query
  2562. */
  2563. static void sd_read_block_provisioning(struct scsi_disk *sdkp)
  2564. {
  2565. struct scsi_vpd *vpd;
  2566. if (sdkp->lbpme == 0)
  2567. return;
  2568. rcu_read_lock();
  2569. vpd = rcu_dereference(sdkp->device->vpd_pgb2);
  2570. if (!vpd || vpd->len < 8) {
  2571. rcu_read_unlock();
  2572. return;
  2573. }
  2574. sdkp->lbpvpd = 1;
  2575. sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
  2576. sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
  2577. sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
  2578. rcu_read_unlock();
  2579. }
  2580. static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
  2581. {
  2582. struct scsi_device *sdev = sdkp->device;
  2583. if (sdev->host->no_write_same) {
  2584. sdev->no_write_same = 1;
  2585. return;
  2586. }
  2587. if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
  2588. struct scsi_vpd *vpd;
  2589. sdev->no_report_opcodes = 1;
  2590. /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
  2591. * CODES is unsupported and the device has an ATA
  2592. * Information VPD page (SAT).
  2593. */
  2594. rcu_read_lock();
  2595. vpd = rcu_dereference(sdev->vpd_pg89);
  2596. if (vpd)
  2597. sdev->no_write_same = 1;
  2598. rcu_read_unlock();
  2599. }
  2600. if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
  2601. sdkp->ws16 = 1;
  2602. if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
  2603. sdkp->ws10 = 1;
  2604. }
  2605. static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
  2606. {
  2607. struct scsi_device *sdev = sdkp->device;
  2608. if (!sdev->security_supported)
  2609. return;
  2610. if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
  2611. SECURITY_PROTOCOL_IN) == 1 &&
  2612. scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
  2613. SECURITY_PROTOCOL_OUT) == 1)
  2614. sdkp->security = 1;
  2615. }
  2616. static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
  2617. {
  2618. return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
  2619. }
  2620. /**
  2621. * sd_read_cpr - Query concurrent positioning ranges
  2622. * @sdkp: disk to query
  2623. */
  2624. static void sd_read_cpr(struct scsi_disk *sdkp)
  2625. {
  2626. struct blk_independent_access_ranges *iars = NULL;
  2627. unsigned char *buffer = NULL;
  2628. unsigned int nr_cpr = 0;
  2629. int i, vpd_len, buf_len = SD_BUF_SIZE;
  2630. u8 *desc;
  2631. /*
  2632. * We need to have the capacity set first for the block layer to be
  2633. * able to check the ranges.
  2634. */
  2635. if (sdkp->first_scan)
  2636. return;
  2637. if (!sdkp->capacity)
  2638. goto out;
  2639. /*
  2640. * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
  2641. * leading to a maximum page size of 64 + 256*32 bytes.
  2642. */
  2643. buf_len = 64 + 256*32;
  2644. buffer = kmalloc(buf_len, GFP_KERNEL);
  2645. if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
  2646. goto out;
  2647. /* We must have at least a 64B header and one 32B range descriptor */
  2648. vpd_len = get_unaligned_be16(&buffer[2]) + 4;
  2649. if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
  2650. sd_printk(KERN_ERR, sdkp,
  2651. "Invalid Concurrent Positioning Ranges VPD page\n");
  2652. goto out;
  2653. }
  2654. nr_cpr = (vpd_len - 64) / 32;
  2655. if (nr_cpr == 1) {
  2656. nr_cpr = 0;
  2657. goto out;
  2658. }
  2659. iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
  2660. if (!iars) {
  2661. nr_cpr = 0;
  2662. goto out;
  2663. }
  2664. desc = &buffer[64];
  2665. for (i = 0; i < nr_cpr; i++, desc += 32) {
  2666. if (desc[0] != i) {
  2667. sd_printk(KERN_ERR, sdkp,
  2668. "Invalid Concurrent Positioning Range number\n");
  2669. nr_cpr = 0;
  2670. break;
  2671. }
  2672. iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
  2673. iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
  2674. }
  2675. out:
  2676. disk_set_independent_access_ranges(sdkp->disk, iars);
  2677. if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
  2678. sd_printk(KERN_NOTICE, sdkp,
  2679. "%u concurrent positioning ranges\n", nr_cpr);
  2680. sdkp->nr_actuators = nr_cpr;
  2681. }
  2682. kfree(buffer);
  2683. }
  2684. static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
  2685. {
  2686. struct scsi_device *sdp = sdkp->device;
  2687. unsigned int min_xfer_bytes =
  2688. logical_to_bytes(sdp, sdkp->min_xfer_blocks);
  2689. if (sdkp->min_xfer_blocks == 0)
  2690. return false;
  2691. if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
  2692. sd_first_printk(KERN_WARNING, sdkp,
  2693. "Preferred minimum I/O size %u bytes not a " \
  2694. "multiple of physical block size (%u bytes)\n",
  2695. min_xfer_bytes, sdkp->physical_block_size);
  2696. sdkp->min_xfer_blocks = 0;
  2697. return false;
  2698. }
  2699. sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
  2700. min_xfer_bytes);
  2701. return true;
  2702. }
  2703. /*
  2704. * Determine the device's preferred I/O size for reads and writes
  2705. * unless the reported value is unreasonably small, large, not a
  2706. * multiple of the physical block size, or simply garbage.
  2707. */
  2708. static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
  2709. unsigned int dev_max)
  2710. {
  2711. struct scsi_device *sdp = sdkp->device;
  2712. unsigned int opt_xfer_bytes =
  2713. logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
  2714. unsigned int min_xfer_bytes =
  2715. logical_to_bytes(sdp, sdkp->min_xfer_blocks);
  2716. if (sdkp->opt_xfer_blocks == 0)
  2717. return false;
  2718. if (sdkp->opt_xfer_blocks > dev_max) {
  2719. sd_first_printk(KERN_WARNING, sdkp,
  2720. "Optimal transfer size %u logical blocks " \
  2721. "> dev_max (%u logical blocks)\n",
  2722. sdkp->opt_xfer_blocks, dev_max);
  2723. return false;
  2724. }
  2725. if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
  2726. sd_first_printk(KERN_WARNING, sdkp,
  2727. "Optimal transfer size %u logical blocks " \
  2728. "> sd driver limit (%u logical blocks)\n",
  2729. sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
  2730. return false;
  2731. }
  2732. if (opt_xfer_bytes < PAGE_SIZE) {
  2733. sd_first_printk(KERN_WARNING, sdkp,
  2734. "Optimal transfer size %u bytes < " \
  2735. "PAGE_SIZE (%u bytes)\n",
  2736. opt_xfer_bytes, (unsigned int)PAGE_SIZE);
  2737. return false;
  2738. }
  2739. if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
  2740. sd_first_printk(KERN_WARNING, sdkp,
  2741. "Optimal transfer size %u bytes not a " \
  2742. "multiple of preferred minimum block " \
  2743. "size (%u bytes)\n",
  2744. opt_xfer_bytes, min_xfer_bytes);
  2745. return false;
  2746. }
  2747. if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
  2748. sd_first_printk(KERN_WARNING, sdkp,
  2749. "Optimal transfer size %u bytes not a " \
  2750. "multiple of physical block size (%u bytes)\n",
  2751. opt_xfer_bytes, sdkp->physical_block_size);
  2752. return false;
  2753. }
  2754. sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
  2755. opt_xfer_bytes);
  2756. return true;
  2757. }
  2758. /**
  2759. * sd_revalidate_disk - called the first time a new disk is seen,
  2760. * performs disk spin up, read_capacity, etc.
  2761. * @disk: struct gendisk we care about
  2762. **/
  2763. static int sd_revalidate_disk(struct gendisk *disk)
  2764. {
  2765. struct scsi_disk *sdkp = scsi_disk(disk);
  2766. struct scsi_device *sdp = sdkp->device;
  2767. struct request_queue *q = sdkp->disk->queue;
  2768. sector_t old_capacity = sdkp->capacity;
  2769. unsigned char *buffer;
  2770. unsigned int dev_max, rw_max;
  2771. SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
  2772. "sd_revalidate_disk\n"));
  2773. /*
  2774. * If the device is offline, don't try and read capacity or any
  2775. * of the other niceties.
  2776. */
  2777. if (!scsi_device_online(sdp))
  2778. goto out;
  2779. buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
  2780. if (!buffer) {
  2781. sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
  2782. "allocation failure.\n");
  2783. goto out;
  2784. }
  2785. sd_spinup_disk(sdkp);
  2786. /*
  2787. * Without media there is no reason to ask; moreover, some devices
  2788. * react badly if we do.
  2789. */
  2790. if (sdkp->media_present) {
  2791. sd_read_capacity(sdkp, buffer);
  2792. /*
  2793. * set the default to rotational. All non-rotational devices
  2794. * support the block characteristics VPD page, which will
  2795. * cause this to be updated correctly and any device which
  2796. * doesn't support it should be treated as rotational.
  2797. */
  2798. blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
  2799. blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
  2800. if (scsi_device_supports_vpd(sdp)) {
  2801. sd_read_block_provisioning(sdkp);
  2802. sd_read_block_limits(sdkp);
  2803. sd_read_block_characteristics(sdkp);
  2804. sd_zbc_read_zones(sdkp, buffer);
  2805. sd_read_cpr(sdkp);
  2806. }
  2807. sd_print_capacity(sdkp, old_capacity);
  2808. sd_read_write_protect_flag(sdkp, buffer);
  2809. sd_read_cache_type(sdkp, buffer);
  2810. sd_read_app_tag_own(sdkp, buffer);
  2811. sd_read_write_same(sdkp, buffer);
  2812. sd_read_security(sdkp, buffer);
  2813. sd_config_protection(sdkp);
  2814. }
  2815. /*
  2816. * We now have all cache related info, determine how we deal
  2817. * with flush requests.
  2818. */
  2819. sd_set_flush_flag(sdkp);
  2820. /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
  2821. dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
  2822. /* Some devices report a maximum block count for READ/WRITE requests. */
  2823. dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
  2824. q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
  2825. if (sd_validate_min_xfer_size(sdkp))
  2826. blk_queue_io_min(sdkp->disk->queue,
  2827. logical_to_bytes(sdp, sdkp->min_xfer_blocks));
  2828. else
  2829. blk_queue_io_min(sdkp->disk->queue, 0);
  2830. if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
  2831. q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
  2832. rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
  2833. } else {
  2834. q->limits.io_opt = 0;
  2835. rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
  2836. (sector_t)BLK_DEF_MAX_SECTORS);
  2837. }
  2838. /*
  2839. * Limit default to SCSI host optimal sector limit if set. There may be
  2840. * an impact on performance for when the size of a request exceeds this
  2841. * host limit.
  2842. */
  2843. rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
  2844. /* Do not exceed controller limit */
  2845. rw_max = min(rw_max, queue_max_hw_sectors(q));
  2846. /*
  2847. * Only update max_sectors if previously unset or if the current value
  2848. * exceeds the capabilities of the hardware.
  2849. */
  2850. if (sdkp->first_scan ||
  2851. q->limits.max_sectors > q->limits.max_dev_sectors ||
  2852. q->limits.max_sectors > q->limits.max_hw_sectors)
  2853. q->limits.max_sectors = rw_max;
  2854. sdkp->first_scan = 0;
  2855. set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
  2856. sd_config_write_same(sdkp);
  2857. kfree(buffer);
  2858. /*
  2859. * For a zoned drive, revalidating the zones can be done only once
  2860. * the gendisk capacity is set. So if this fails, set back the gendisk
  2861. * capacity to 0.
  2862. */
  2863. if (sd_zbc_revalidate_zones(sdkp))
  2864. set_capacity_and_notify(disk, 0);
  2865. out:
  2866. return 0;
  2867. }
  2868. /**
  2869. * sd_unlock_native_capacity - unlock native capacity
  2870. * @disk: struct gendisk to set capacity for
  2871. *
  2872. * Block layer calls this function if it detects that partitions
  2873. * on @disk reach beyond the end of the device. If the SCSI host
  2874. * implements ->unlock_native_capacity() method, it's invoked to
  2875. * give it a chance to adjust the device capacity.
  2876. *
  2877. * CONTEXT:
  2878. * Defined by block layer. Might sleep.
  2879. */
  2880. static void sd_unlock_native_capacity(struct gendisk *disk)
  2881. {
  2882. struct scsi_device *sdev = scsi_disk(disk)->device;
  2883. if (sdev->host->hostt->unlock_native_capacity)
  2884. sdev->host->hostt->unlock_native_capacity(sdev);
  2885. }
  2886. /**
  2887. * sd_format_disk_name - format disk name
  2888. * @prefix: name prefix - ie. "sd" for SCSI disks
  2889. * @index: index of the disk to format name for
  2890. * @buf: output buffer
  2891. * @buflen: length of the output buffer
  2892. *
  2893. * SCSI disk names starts at sda. The 26th device is sdz and the
  2894. * 27th is sdaa. The last one for two lettered suffix is sdzz
  2895. * which is followed by sdaaa.
  2896. *
  2897. * This is basically 26 base counting with one extra 'nil' entry
  2898. * at the beginning from the second digit on and can be
  2899. * determined using similar method as 26 base conversion with the
  2900. * index shifted -1 after each digit is computed.
  2901. *
  2902. * CONTEXT:
  2903. * Don't care.
  2904. *
  2905. * RETURNS:
  2906. * 0 on success, -errno on failure.
  2907. */
  2908. static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
  2909. {
  2910. const int base = 'z' - 'a' + 1;
  2911. char *begin = buf + strlen(prefix);
  2912. char *end = buf + buflen;
  2913. char *p;
  2914. int unit;
  2915. p = end - 1;
  2916. *p = '\0';
  2917. unit = base;
  2918. do {
  2919. if (p == begin)
  2920. return -EINVAL;
  2921. *--p = 'a' + (index % unit);
  2922. index = (index / unit) - 1;
  2923. } while (index >= 0);
  2924. memmove(begin, p, end - p);
  2925. memcpy(buf, prefix, strlen(prefix));
  2926. return 0;
  2927. }
  2928. /**
  2929. * sd_probe - called during driver initialization and whenever a
  2930. * new scsi device is attached to the system. It is called once
  2931. * for each scsi device (not just disks) present.
  2932. * @dev: pointer to device object
  2933. *
  2934. * Returns 0 if successful (or not interested in this scsi device
  2935. * (e.g. scanner)); 1 when there is an error.
  2936. *
  2937. * Note: this function is invoked from the scsi mid-level.
  2938. * This function sets up the mapping between a given
  2939. * <host,channel,id,lun> (found in sdp) and new device name
  2940. * (e.g. /dev/sda). More precisely it is the block device major
  2941. * and minor number that is chosen here.
  2942. *
  2943. * Assume sd_probe is not re-entrant (for time being)
  2944. * Also think about sd_probe() and sd_remove() running coincidentally.
  2945. **/
  2946. static int sd_probe(struct device *dev)
  2947. {
  2948. struct scsi_device *sdp = to_scsi_device(dev);
  2949. struct scsi_disk *sdkp;
  2950. struct gendisk *gd;
  2951. int index;
  2952. int error;
  2953. scsi_autopm_get_device(sdp);
  2954. error = -ENODEV;
  2955. if (sdp->type != TYPE_DISK &&
  2956. sdp->type != TYPE_ZBC &&
  2957. sdp->type != TYPE_MOD &&
  2958. sdp->type != TYPE_RBC)
  2959. goto out;
  2960. if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
  2961. sdev_printk(KERN_WARNING, sdp,
  2962. "Unsupported ZBC host-managed device.\n");
  2963. goto out;
  2964. }
  2965. SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
  2966. "sd_probe\n"));
  2967. error = -ENOMEM;
  2968. sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
  2969. if (!sdkp)
  2970. goto out;
  2971. gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
  2972. &sd_bio_compl_lkclass);
  2973. if (!gd)
  2974. goto out_free;
  2975. index = ida_alloc(&sd_index_ida, GFP_KERNEL);
  2976. if (index < 0) {
  2977. sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
  2978. goto out_put;
  2979. }
  2980. error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
  2981. if (error) {
  2982. sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
  2983. goto out_free_index;
  2984. }
  2985. sdkp->device = sdp;
  2986. sdkp->disk = gd;
  2987. sdkp->index = index;
  2988. sdkp->max_retries = SD_MAX_RETRIES;
  2989. atomic_set(&sdkp->openers, 0);
  2990. atomic_set(&sdkp->device->ioerr_cnt, 0);
  2991. if (!sdp->request_queue->rq_timeout) {
  2992. if (sdp->type != TYPE_MOD)
  2993. blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
  2994. else
  2995. blk_queue_rq_timeout(sdp->request_queue,
  2996. SD_MOD_TIMEOUT);
  2997. }
  2998. device_initialize(&sdkp->disk_dev);
  2999. sdkp->disk_dev.parent = get_device(dev);
  3000. sdkp->disk_dev.class = &sd_disk_class;
  3001. dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
  3002. error = device_add(&sdkp->disk_dev);
  3003. if (error) {
  3004. put_device(&sdkp->disk_dev);
  3005. goto out;
  3006. }
  3007. dev_set_drvdata(dev, sdkp);
  3008. gd->major = sd_major((index & 0xf0) >> 4);
  3009. gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
  3010. gd->minors = SD_MINORS;
  3011. gd->fops = &sd_fops;
  3012. gd->private_data = sdkp;
  3013. /* defaults, until the device tells us otherwise */
  3014. sdp->sector_size = 512;
  3015. sdkp->capacity = 0;
  3016. sdkp->media_present = 1;
  3017. sdkp->write_prot = 0;
  3018. sdkp->cache_override = 0;
  3019. sdkp->WCE = 0;
  3020. sdkp->RCD = 0;
  3021. sdkp->ATO = 0;
  3022. sdkp->first_scan = 1;
  3023. sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
  3024. sd_revalidate_disk(gd);
  3025. if (sdp->removable) {
  3026. gd->flags |= GENHD_FL_REMOVABLE;
  3027. gd->events |= DISK_EVENT_MEDIA_CHANGE;
  3028. gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
  3029. }
  3030. blk_pm_runtime_init(sdp->request_queue, dev);
  3031. if (sdp->rpm_autosuspend) {
  3032. pm_runtime_set_autosuspend_delay(dev,
  3033. sdp->host->hostt->rpm_autosuspend_delay);
  3034. }
  3035. error = device_add_disk(dev, gd, NULL);
  3036. if (error) {
  3037. put_device(&sdkp->disk_dev);
  3038. put_disk(gd);
  3039. goto out;
  3040. }
  3041. if (sdkp->security) {
  3042. sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
  3043. if (sdkp->opal_dev)
  3044. sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
  3045. }
  3046. sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
  3047. sdp->removable ? "removable " : "");
  3048. scsi_autopm_put_device(sdp);
  3049. return 0;
  3050. out_free_index:
  3051. ida_free(&sd_index_ida, index);
  3052. out_put:
  3053. put_disk(gd);
  3054. out_free:
  3055. kfree(sdkp);
  3056. out:
  3057. scsi_autopm_put_device(sdp);
  3058. return error;
  3059. }
  3060. /**
  3061. * sd_remove - called whenever a scsi disk (previously recognized by
  3062. * sd_probe) is detached from the system. It is called (potentially
  3063. * multiple times) during sd module unload.
  3064. * @dev: pointer to device object
  3065. *
  3066. * Note: this function is invoked from the scsi mid-level.
  3067. * This function potentially frees up a device name (e.g. /dev/sdc)
  3068. * that could be re-used by a subsequent sd_probe().
  3069. * This function is not called when the built-in sd driver is "exit-ed".
  3070. **/
  3071. static int sd_remove(struct device *dev)
  3072. {
  3073. struct scsi_disk *sdkp = dev_get_drvdata(dev);
  3074. scsi_autopm_get_device(sdkp->device);
  3075. device_del(&sdkp->disk_dev);
  3076. del_gendisk(sdkp->disk);
  3077. sd_shutdown(dev);
  3078. put_disk(sdkp->disk);
  3079. return 0;
  3080. }
  3081. static void scsi_disk_release(struct device *dev)
  3082. {
  3083. struct scsi_disk *sdkp = to_scsi_disk(dev);
  3084. ida_free(&sd_index_ida, sdkp->index);
  3085. sd_zbc_free_zone_info(sdkp);
  3086. put_device(&sdkp->device->sdev_gendev);
  3087. free_opal_dev(sdkp->opal_dev);
  3088. kfree(sdkp);
  3089. }
  3090. static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
  3091. {
  3092. unsigned char cmd[6] = { START_STOP }; /* START_VALID */
  3093. struct scsi_sense_hdr sshdr;
  3094. const struct scsi_exec_args exec_args = {
  3095. .sshdr = &sshdr,
  3096. .req_flags = BLK_MQ_REQ_PM,
  3097. };
  3098. struct scsi_device *sdp = sdkp->device;
  3099. int res;
  3100. if (start)
  3101. cmd[4] |= 1; /* START */
  3102. if (sdp->start_stop_pwr_cond)
  3103. cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
  3104. if (!scsi_device_online(sdp))
  3105. return -ENODEV;
  3106. res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
  3107. sdkp->max_retries, &exec_args);
  3108. if (res) {
  3109. sd_print_result(sdkp, "Start/Stop Unit failed", res);
  3110. if (res > 0 && scsi_sense_valid(&sshdr)) {
  3111. sd_print_sense_hdr(sdkp, &sshdr);
  3112. /* 0x3a is medium not present */
  3113. if (sshdr.asc == 0x3a)
  3114. res = 0;
  3115. }
  3116. }
  3117. /* SCSI error codes must not go to the generic layer */
  3118. if (res)
  3119. return -EIO;
  3120. return 0;
  3121. }
  3122. /*
  3123. * Send a SYNCHRONIZE CACHE instruction down to the device through
  3124. * the normal SCSI command structure. Wait for the command to
  3125. * complete.
  3126. */
  3127. static void sd_shutdown(struct device *dev)
  3128. {
  3129. struct scsi_disk *sdkp = dev_get_drvdata(dev);
  3130. if (!sdkp)
  3131. return; /* this can happen */
  3132. if (pm_runtime_suspended(dev))
  3133. return;
  3134. if (sdkp->WCE && sdkp->media_present) {
  3135. sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
  3136. sd_sync_cache(sdkp, NULL);
  3137. }
  3138. if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
  3139. sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
  3140. sd_start_stop_device(sdkp, 0);
  3141. }
  3142. }
  3143. static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
  3144. {
  3145. struct scsi_disk *sdkp = dev_get_drvdata(dev);
  3146. struct scsi_sense_hdr sshdr;
  3147. int ret = 0;
  3148. if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
  3149. return 0;
  3150. if (sdkp->WCE && sdkp->media_present) {
  3151. if (!sdkp->device->silence_suspend)
  3152. sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
  3153. ret = sd_sync_cache(sdkp, &sshdr);
  3154. if (ret) {
  3155. /* ignore OFFLINE device */
  3156. if (ret == -ENODEV)
  3157. return 0;
  3158. if (!scsi_sense_valid(&sshdr) ||
  3159. sshdr.sense_key != ILLEGAL_REQUEST)
  3160. return ret;
  3161. /*
  3162. * sshdr.sense_key == ILLEGAL_REQUEST means this drive
  3163. * doesn't support sync. There's not much to do and
  3164. * suspend shouldn't fail.
  3165. */
  3166. ret = 0;
  3167. }
  3168. }
  3169. if (sdkp->device->manage_start_stop) {
  3170. if (!sdkp->device->silence_suspend)
  3171. sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
  3172. /* an error is not worth aborting a system sleep */
  3173. ret = sd_start_stop_device(sdkp, 0);
  3174. if (ignore_stop_errors)
  3175. ret = 0;
  3176. }
  3177. return ret;
  3178. }
  3179. static int sd_suspend_system(struct device *dev)
  3180. {
  3181. if (pm_runtime_suspended(dev))
  3182. return 0;
  3183. return sd_suspend_common(dev, true);
  3184. }
  3185. static int sd_suspend_runtime(struct device *dev)
  3186. {
  3187. return sd_suspend_common(dev, false);
  3188. }
  3189. static int sd_resume(struct device *dev)
  3190. {
  3191. struct scsi_disk *sdkp = dev_get_drvdata(dev);
  3192. int ret;
  3193. if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
  3194. return 0;
  3195. if (!sdkp->device->manage_start_stop)
  3196. return 0;
  3197. sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
  3198. ret = sd_start_stop_device(sdkp, 1);
  3199. if (!ret)
  3200. opal_unlock_from_suspend(sdkp->opal_dev);
  3201. return ret;
  3202. }
  3203. static int sd_resume_system(struct device *dev)
  3204. {
  3205. if (pm_runtime_suspended(dev))
  3206. return 0;
  3207. return sd_resume(dev);
  3208. }
  3209. static int sd_resume_runtime(struct device *dev)
  3210. {
  3211. struct scsi_disk *sdkp = dev_get_drvdata(dev);
  3212. struct scsi_device *sdp;
  3213. if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
  3214. return 0;
  3215. sdp = sdkp->device;
  3216. if (sdp->ignore_media_change) {
  3217. /* clear the device's sense data */
  3218. static const u8 cmd[10] = { REQUEST_SENSE };
  3219. const struct scsi_exec_args exec_args = {
  3220. .req_flags = BLK_MQ_REQ_PM,
  3221. };
  3222. if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
  3223. sdp->request_queue->rq_timeout, 1,
  3224. &exec_args))
  3225. sd_printk(KERN_NOTICE, sdkp,
  3226. "Failed to clear sense data\n");
  3227. }
  3228. return sd_resume(dev);
  3229. }
  3230. /**
  3231. * init_sd - entry point for this driver (both when built in or when
  3232. * a module).
  3233. *
  3234. * Note: this function registers this driver with the scsi mid-level.
  3235. **/
  3236. static int __init init_sd(void)
  3237. {
  3238. int majors = 0, i, err;
  3239. SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
  3240. for (i = 0; i < SD_MAJORS; i++) {
  3241. if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
  3242. continue;
  3243. majors++;
  3244. }
  3245. if (!majors)
  3246. return -ENODEV;
  3247. err = class_register(&sd_disk_class);
  3248. if (err)
  3249. goto err_out;
  3250. sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
  3251. 0, 0, NULL);
  3252. if (!sd_cdb_cache) {
  3253. printk(KERN_ERR "sd: can't init extended cdb cache\n");
  3254. err = -ENOMEM;
  3255. goto err_out_class;
  3256. }
  3257. sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
  3258. if (!sd_page_pool) {
  3259. printk(KERN_ERR "sd: can't init discard page pool\n");
  3260. err = -ENOMEM;
  3261. goto err_out_cache;
  3262. }
  3263. err = scsi_register_driver(&sd_template.gendrv);
  3264. if (err)
  3265. goto err_out_driver;
  3266. return 0;
  3267. err_out_driver:
  3268. mempool_destroy(sd_page_pool);
  3269. err_out_cache:
  3270. kmem_cache_destroy(sd_cdb_cache);
  3271. err_out_class:
  3272. class_unregister(&sd_disk_class);
  3273. err_out:
  3274. for (i = 0; i < SD_MAJORS; i++)
  3275. unregister_blkdev(sd_major(i), "sd");
  3276. return err;
  3277. }
  3278. /**
  3279. * exit_sd - exit point for this driver (when it is a module).
  3280. *
  3281. * Note: this function unregisters this driver from the scsi mid-level.
  3282. **/
  3283. static void __exit exit_sd(void)
  3284. {
  3285. int i;
  3286. SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
  3287. scsi_unregister_driver(&sd_template.gendrv);
  3288. mempool_destroy(sd_page_pool);
  3289. kmem_cache_destroy(sd_cdb_cache);
  3290. class_unregister(&sd_disk_class);
  3291. for (i = 0; i < SD_MAJORS; i++)
  3292. unregister_blkdev(sd_major(i), "sd");
  3293. }
  3294. module_init(init_sd);
  3295. module_exit(exit_sd);
  3296. void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
  3297. {
  3298. scsi_print_sense_hdr(sdkp->device,
  3299. sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
  3300. }
  3301. void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
  3302. {
  3303. const char *hb_string = scsi_hostbyte_string(result);
  3304. if (hb_string)
  3305. sd_printk(KERN_INFO, sdkp,
  3306. "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
  3307. hb_string ? hb_string : "invalid",
  3308. "DRIVER_OK");
  3309. else
  3310. sd_printk(KERN_INFO, sdkp,
  3311. "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
  3312. msg, host_byte(result), "DRIVER_OK");
  3313. }