nfit.c 91 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/platform_device.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/workqueue.h>
  9. #include <linux/libnvdimm.h>
  10. #include <linux/genalloc.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/device.h>
  13. #include <linux/module.h>
  14. #include <linux/mutex.h>
  15. #include <linux/ndctl.h>
  16. #include <linux/sizes.h>
  17. #include <linux/list.h>
  18. #include <linux/slab.h>
  19. #include <nd-core.h>
  20. #include <intel.h>
  21. #include <nfit.h>
  22. #include <nd.h>
  23. #include "nfit_test.h"
  24. #include "../watermark.h"
  25. /*
  26. * Generate an NFIT table to describe the following topology:
  27. *
  28. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  29. *
  30. * (a) (b) DIMM BLK-REGION
  31. * +----------+--------------+----------+---------+
  32. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  33. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  34. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  35. * | +----------+--------------v----------v v
  36. * +--+---+ | |
  37. * | cpu0 | region1
  38. * +--+---+ | |
  39. * | +-------------------------^----------^ ^
  40. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  41. * | imc1 +--+-------------------------+----------+ +
  42. * +------+ | blk5.0 | pm1.0 | 3 region5
  43. * +-------------------------+----------+-+-------+
  44. *
  45. * +--+---+
  46. * | cpu1 |
  47. * +--+---+ (Hotplug DIMM)
  48. * | +----------------------------------------------+
  49. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  50. * | imc0 +--+----------------------------------------------+
  51. * +------+
  52. *
  53. *
  54. * *) In this layout we have four dimms and two memory controllers in one
  55. * socket. Each unique interface (BLK or PMEM) to DPA space
  56. * is identified by a region device with a dynamically assigned id.
  57. *
  58. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  59. * A single PMEM namespace "pm0.0" is created using half of the
  60. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  61. * allocate from from the bottom of a region. The unallocated
  62. * portion of REGION0 aliases with REGION2 and REGION3. That
  63. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  64. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  65. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  66. * names that can be assigned to a namespace.
  67. *
  68. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  69. * SPA range, REGION1, that spans those two dimms as well as dimm2
  70. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  71. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  72. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  73. * "blk5.0".
  74. *
  75. * *) The portion of dimm2 and dimm3 that do not participate in the
  76. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  77. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  78. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  79. * can consume aliased capacity from multiple interleave sets.
  80. *
  81. * BUS1: Legacy NVDIMM (single contiguous range)
  82. *
  83. * region2
  84. * +---------------------+
  85. * |---------------------|
  86. * || pm2.0 ||
  87. * |---------------------|
  88. * +---------------------+
  89. *
  90. * *) A NFIT-table may describe a simple system-physical-address range
  91. * with no BLK aliasing. This type of region may optionally
  92. * reference an NVDIMM.
  93. */
  94. enum {
  95. NUM_PM = 3,
  96. NUM_DCR = 5,
  97. NUM_HINTS = 8,
  98. NUM_BDW = NUM_DCR,
  99. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  100. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */
  101. + 4 /* spa1 iset */ + 1 /* spa11 iset */,
  102. DIMM_SIZE = SZ_32M,
  103. LABEL_SIZE = SZ_128K,
  104. SPA_VCD_SIZE = SZ_4M,
  105. SPA0_SIZE = DIMM_SIZE,
  106. SPA1_SIZE = DIMM_SIZE*2,
  107. SPA2_SIZE = DIMM_SIZE,
  108. BDW_SIZE = 64 << 8,
  109. DCR_SIZE = 12,
  110. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  111. };
  112. struct nfit_test_dcr {
  113. __le64 bdw_addr;
  114. __le32 bdw_status;
  115. __u8 aperature[BDW_SIZE];
  116. };
  117. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  118. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  119. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  120. static u32 handle[] = {
  121. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  122. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  123. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  124. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  125. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  126. [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
  127. [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
  128. };
  129. static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
  130. static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
  131. struct nfit_test_sec {
  132. u8 state;
  133. u8 ext_state;
  134. u8 old_state;
  135. u8 passphrase[32];
  136. u8 master_passphrase[32];
  137. u64 overwrite_end_time;
  138. } dimm_sec_info[NUM_DCR];
  139. static const struct nd_intel_smart smart_def = {
  140. .flags = ND_INTEL_SMART_HEALTH_VALID
  141. | ND_INTEL_SMART_SPARES_VALID
  142. | ND_INTEL_SMART_ALARM_VALID
  143. | ND_INTEL_SMART_USED_VALID
  144. | ND_INTEL_SMART_SHUTDOWN_VALID
  145. | ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
  146. | ND_INTEL_SMART_MTEMP_VALID
  147. | ND_INTEL_SMART_CTEMP_VALID,
  148. .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
  149. .media_temperature = 23 * 16,
  150. .ctrl_temperature = 25 * 16,
  151. .pmic_temperature = 40 * 16,
  152. .spares = 75,
  153. .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
  154. | ND_INTEL_SMART_TEMP_TRIP,
  155. .ait_status = 1,
  156. .life_used = 5,
  157. .shutdown_state = 0,
  158. .shutdown_count = 42,
  159. .vendor_size = 0,
  160. };
  161. struct nfit_test_fw {
  162. enum intel_fw_update_state state;
  163. u32 context;
  164. u64 version;
  165. u32 size_received;
  166. u64 end_time;
  167. bool armed;
  168. bool missed_activate;
  169. unsigned long last_activate;
  170. };
  171. struct nfit_test {
  172. struct acpi_nfit_desc acpi_desc;
  173. struct platform_device pdev;
  174. struct list_head resources;
  175. void *nfit_buf;
  176. dma_addr_t nfit_dma;
  177. size_t nfit_size;
  178. size_t nfit_filled;
  179. int dcr_idx;
  180. int num_dcr;
  181. int num_pm;
  182. void **dimm;
  183. dma_addr_t *dimm_dma;
  184. void **flush;
  185. dma_addr_t *flush_dma;
  186. void **label;
  187. dma_addr_t *label_dma;
  188. void **spa_set;
  189. dma_addr_t *spa_set_dma;
  190. struct nfit_test_dcr **dcr;
  191. dma_addr_t *dcr_dma;
  192. int (*alloc)(struct nfit_test *t);
  193. void (*setup)(struct nfit_test *t);
  194. int setup_hotplug;
  195. union acpi_object **_fit;
  196. dma_addr_t _fit_dma;
  197. struct ars_state {
  198. struct nd_cmd_ars_status *ars_status;
  199. unsigned long deadline;
  200. spinlock_t lock;
  201. } ars_state;
  202. struct device *dimm_dev[ARRAY_SIZE(handle)];
  203. struct nd_intel_smart *smart;
  204. struct nd_intel_smart_threshold *smart_threshold;
  205. struct badrange badrange;
  206. struct work_struct work;
  207. struct nfit_test_fw *fw;
  208. };
  209. static struct workqueue_struct *nfit_wq;
  210. static struct gen_pool *nfit_pool;
  211. static const char zero_key[NVDIMM_PASSPHRASE_LEN];
  212. static struct nfit_test *to_nfit_test(struct device *dev)
  213. {
  214. struct platform_device *pdev = to_platform_device(dev);
  215. return container_of(pdev, struct nfit_test, pdev);
  216. }
  217. static int nd_intel_test_get_fw_info(struct nfit_test *t,
  218. struct nd_intel_fw_info *nd_cmd, unsigned int buf_len,
  219. int idx)
  220. {
  221. struct device *dev = &t->pdev.dev;
  222. struct nfit_test_fw *fw = &t->fw[idx];
  223. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
  224. __func__, t, nd_cmd, buf_len, idx);
  225. if (buf_len < sizeof(*nd_cmd))
  226. return -EINVAL;
  227. nd_cmd->status = 0;
  228. nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE;
  229. nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN;
  230. nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL;
  231. nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME;
  232. nd_cmd->update_cap = 0;
  233. nd_cmd->fis_version = INTEL_FW_FIS_VERSION;
  234. nd_cmd->run_version = 0;
  235. nd_cmd->updated_version = fw->version;
  236. return 0;
  237. }
  238. static int nd_intel_test_start_update(struct nfit_test *t,
  239. struct nd_intel_fw_start *nd_cmd, unsigned int buf_len,
  240. int idx)
  241. {
  242. struct device *dev = &t->pdev.dev;
  243. struct nfit_test_fw *fw = &t->fw[idx];
  244. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  245. __func__, t, nd_cmd, buf_len, idx);
  246. if (buf_len < sizeof(*nd_cmd))
  247. return -EINVAL;
  248. if (fw->state != FW_STATE_NEW) {
  249. /* extended status, FW update in progress */
  250. nd_cmd->status = 0x10007;
  251. return 0;
  252. }
  253. fw->state = FW_STATE_IN_PROGRESS;
  254. fw->context++;
  255. fw->size_received = 0;
  256. nd_cmd->status = 0;
  257. nd_cmd->context = fw->context;
  258. dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context);
  259. return 0;
  260. }
  261. static int nd_intel_test_send_data(struct nfit_test *t,
  262. struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len,
  263. int idx)
  264. {
  265. struct device *dev = &t->pdev.dev;
  266. struct nfit_test_fw *fw = &t->fw[idx];
  267. u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length];
  268. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  269. __func__, t, nd_cmd, buf_len, idx);
  270. if (buf_len < sizeof(*nd_cmd))
  271. return -EINVAL;
  272. dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status);
  273. dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]);
  274. dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1,
  275. nd_cmd->data[nd_cmd->length-1]);
  276. if (fw->state != FW_STATE_IN_PROGRESS) {
  277. dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__);
  278. *status = 0x5;
  279. return 0;
  280. }
  281. if (nd_cmd->context != fw->context) {
  282. dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
  283. __func__, nd_cmd->context, fw->context);
  284. *status = 0x10007;
  285. return 0;
  286. }
  287. /*
  288. * check offset + len > size of fw storage
  289. * check length is > max send length
  290. */
  291. if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE ||
  292. nd_cmd->length > INTEL_FW_MAX_SEND_LEN) {
  293. *status = 0x3;
  294. dev_dbg(dev, "%s: buffer boundary violation\n", __func__);
  295. return 0;
  296. }
  297. fw->size_received += nd_cmd->length;
  298. dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n",
  299. __func__, nd_cmd->length, fw->size_received);
  300. *status = 0;
  301. return 0;
  302. }
  303. static int nd_intel_test_finish_fw(struct nfit_test *t,
  304. struct nd_intel_fw_finish_update *nd_cmd,
  305. unsigned int buf_len, int idx)
  306. {
  307. struct device *dev = &t->pdev.dev;
  308. struct nfit_test_fw *fw = &t->fw[idx];
  309. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  310. __func__, t, nd_cmd, buf_len, idx);
  311. if (fw->state == FW_STATE_UPDATED) {
  312. /* update already done, need activation */
  313. nd_cmd->status = 0x20007;
  314. return 0;
  315. }
  316. dev_dbg(dev, "%s: context: %#x ctrl_flags: %#x\n",
  317. __func__, nd_cmd->context, nd_cmd->ctrl_flags);
  318. switch (nd_cmd->ctrl_flags) {
  319. case 0: /* finish */
  320. if (nd_cmd->context != fw->context) {
  321. dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
  322. __func__, nd_cmd->context,
  323. fw->context);
  324. nd_cmd->status = 0x10007;
  325. return 0;
  326. }
  327. nd_cmd->status = 0;
  328. fw->state = FW_STATE_VERIFY;
  329. /* set 1 second of time for firmware "update" */
  330. fw->end_time = jiffies + HZ;
  331. break;
  332. case 1: /* abort */
  333. fw->size_received = 0;
  334. /* successfully aborted status */
  335. nd_cmd->status = 0x40007;
  336. fw->state = FW_STATE_NEW;
  337. dev_dbg(dev, "%s: abort successful\n", __func__);
  338. break;
  339. default: /* bad control flag */
  340. dev_warn(dev, "%s: unknown control flag: %#x\n",
  341. __func__, nd_cmd->ctrl_flags);
  342. return -EINVAL;
  343. }
  344. return 0;
  345. }
  346. static int nd_intel_test_finish_query(struct nfit_test *t,
  347. struct nd_intel_fw_finish_query *nd_cmd,
  348. unsigned int buf_len, int idx)
  349. {
  350. struct device *dev = &t->pdev.dev;
  351. struct nfit_test_fw *fw = &t->fw[idx];
  352. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  353. __func__, t, nd_cmd, buf_len, idx);
  354. if (buf_len < sizeof(*nd_cmd))
  355. return -EINVAL;
  356. if (nd_cmd->context != fw->context) {
  357. dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
  358. __func__, nd_cmd->context, fw->context);
  359. nd_cmd->status = 0x10007;
  360. return 0;
  361. }
  362. dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context);
  363. switch (fw->state) {
  364. case FW_STATE_NEW:
  365. nd_cmd->updated_fw_rev = 0;
  366. nd_cmd->status = 0;
  367. dev_dbg(dev, "%s: new state\n", __func__);
  368. break;
  369. case FW_STATE_IN_PROGRESS:
  370. /* sequencing error */
  371. nd_cmd->status = 0x40007;
  372. nd_cmd->updated_fw_rev = 0;
  373. dev_dbg(dev, "%s: sequence error\n", __func__);
  374. break;
  375. case FW_STATE_VERIFY:
  376. if (time_is_after_jiffies64(fw->end_time)) {
  377. nd_cmd->updated_fw_rev = 0;
  378. nd_cmd->status = 0x20007;
  379. dev_dbg(dev, "%s: still verifying\n", __func__);
  380. break;
  381. }
  382. dev_dbg(dev, "%s: transition out verify\n", __func__);
  383. fw->state = FW_STATE_UPDATED;
  384. fw->missed_activate = false;
  385. fallthrough;
  386. case FW_STATE_UPDATED:
  387. nd_cmd->status = 0;
  388. /* bogus test version */
  389. fw->version = nd_cmd->updated_fw_rev =
  390. INTEL_FW_FAKE_VERSION;
  391. dev_dbg(dev, "%s: updated\n", __func__);
  392. break;
  393. default: /* we should never get here */
  394. return -EINVAL;
  395. }
  396. return 0;
  397. }
  398. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  399. unsigned int buf_len)
  400. {
  401. if (buf_len < sizeof(*nd_cmd))
  402. return -EINVAL;
  403. nd_cmd->status = 0;
  404. nd_cmd->config_size = LABEL_SIZE;
  405. nd_cmd->max_xfer = SZ_4K;
  406. return 0;
  407. }
  408. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  409. *nd_cmd, unsigned int buf_len, void *label)
  410. {
  411. unsigned int len, offset = nd_cmd->in_offset;
  412. int rc;
  413. if (buf_len < sizeof(*nd_cmd))
  414. return -EINVAL;
  415. if (offset >= LABEL_SIZE)
  416. return -EINVAL;
  417. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  418. return -EINVAL;
  419. nd_cmd->status = 0;
  420. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  421. memcpy(nd_cmd->out_buf, label + offset, len);
  422. rc = buf_len - sizeof(*nd_cmd) - len;
  423. return rc;
  424. }
  425. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  426. unsigned int buf_len, void *label)
  427. {
  428. unsigned int len, offset = nd_cmd->in_offset;
  429. u32 *status;
  430. int rc;
  431. if (buf_len < sizeof(*nd_cmd))
  432. return -EINVAL;
  433. if (offset >= LABEL_SIZE)
  434. return -EINVAL;
  435. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  436. return -EINVAL;
  437. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  438. *status = 0;
  439. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  440. memcpy(label + offset, nd_cmd->in_buf, len);
  441. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  442. return rc;
  443. }
  444. #define NFIT_TEST_CLEAR_ERR_UNIT 256
  445. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  446. unsigned int buf_len)
  447. {
  448. int ars_recs;
  449. if (buf_len < sizeof(*nd_cmd))
  450. return -EINVAL;
  451. /* for testing, only store up to n records that fit within 4k */
  452. ars_recs = SZ_4K / sizeof(struct nd_ars_record);
  453. nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
  454. + ars_recs * sizeof(struct nd_ars_record);
  455. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  456. nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
  457. return 0;
  458. }
  459. static void post_ars_status(struct ars_state *ars_state,
  460. struct badrange *badrange, u64 addr, u64 len)
  461. {
  462. struct nd_cmd_ars_status *ars_status;
  463. struct nd_ars_record *ars_record;
  464. struct badrange_entry *be;
  465. u64 end = addr + len - 1;
  466. int i = 0;
  467. ars_state->deadline = jiffies + 1*HZ;
  468. ars_status = ars_state->ars_status;
  469. ars_status->status = 0;
  470. ars_status->address = addr;
  471. ars_status->length = len;
  472. ars_status->type = ND_ARS_PERSISTENT;
  473. spin_lock(&badrange->lock);
  474. list_for_each_entry(be, &badrange->list, list) {
  475. u64 be_end = be->start + be->length - 1;
  476. u64 rstart, rend;
  477. /* skip entries outside the range */
  478. if (be_end < addr || be->start > end)
  479. continue;
  480. rstart = (be->start < addr) ? addr : be->start;
  481. rend = (be_end < end) ? be_end : end;
  482. ars_record = &ars_status->records[i];
  483. ars_record->handle = 0;
  484. ars_record->err_address = rstart;
  485. ars_record->length = rend - rstart + 1;
  486. i++;
  487. }
  488. spin_unlock(&badrange->lock);
  489. ars_status->num_records = i;
  490. ars_status->out_length = sizeof(struct nd_cmd_ars_status)
  491. + i * sizeof(struct nd_ars_record);
  492. }
  493. static int nfit_test_cmd_ars_start(struct nfit_test *t,
  494. struct ars_state *ars_state,
  495. struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
  496. int *cmd_rc)
  497. {
  498. if (buf_len < sizeof(*ars_start))
  499. return -EINVAL;
  500. spin_lock(&ars_state->lock);
  501. if (time_before(jiffies, ars_state->deadline)) {
  502. ars_start->status = NFIT_ARS_START_BUSY;
  503. *cmd_rc = -EBUSY;
  504. } else {
  505. ars_start->status = 0;
  506. ars_start->scrub_time = 1;
  507. post_ars_status(ars_state, &t->badrange, ars_start->address,
  508. ars_start->length);
  509. *cmd_rc = 0;
  510. }
  511. spin_unlock(&ars_state->lock);
  512. return 0;
  513. }
  514. static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
  515. struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
  516. int *cmd_rc)
  517. {
  518. if (buf_len < ars_state->ars_status->out_length)
  519. return -EINVAL;
  520. spin_lock(&ars_state->lock);
  521. if (time_before(jiffies, ars_state->deadline)) {
  522. memset(ars_status, 0, buf_len);
  523. ars_status->status = NFIT_ARS_STATUS_BUSY;
  524. ars_status->out_length = sizeof(*ars_status);
  525. *cmd_rc = -EBUSY;
  526. } else {
  527. memcpy(ars_status, ars_state->ars_status,
  528. ars_state->ars_status->out_length);
  529. *cmd_rc = 0;
  530. }
  531. spin_unlock(&ars_state->lock);
  532. return 0;
  533. }
  534. static int nfit_test_cmd_clear_error(struct nfit_test *t,
  535. struct nd_cmd_clear_error *clear_err,
  536. unsigned int buf_len, int *cmd_rc)
  537. {
  538. const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
  539. if (buf_len < sizeof(*clear_err))
  540. return -EINVAL;
  541. if ((clear_err->address & mask) || (clear_err->length & mask))
  542. return -EINVAL;
  543. badrange_forget(&t->badrange, clear_err->address, clear_err->length);
  544. clear_err->status = 0;
  545. clear_err->cleared = clear_err->length;
  546. *cmd_rc = 0;
  547. return 0;
  548. }
  549. struct region_search_spa {
  550. u64 addr;
  551. struct nd_region *region;
  552. };
  553. static int is_region_device(struct device *dev)
  554. {
  555. return !strncmp(dev->kobj.name, "region", 6);
  556. }
  557. static int nfit_test_search_region_spa(struct device *dev, void *data)
  558. {
  559. struct region_search_spa *ctx = data;
  560. struct nd_region *nd_region;
  561. resource_size_t ndr_end;
  562. if (!is_region_device(dev))
  563. return 0;
  564. nd_region = to_nd_region(dev);
  565. ndr_end = nd_region->ndr_start + nd_region->ndr_size;
  566. if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
  567. ctx->region = nd_region;
  568. return 1;
  569. }
  570. return 0;
  571. }
  572. static int nfit_test_search_spa(struct nvdimm_bus *bus,
  573. struct nd_cmd_translate_spa *spa)
  574. {
  575. int ret;
  576. struct nd_region *nd_region = NULL;
  577. struct nvdimm *nvdimm = NULL;
  578. struct nd_mapping *nd_mapping = NULL;
  579. struct region_search_spa ctx = {
  580. .addr = spa->spa,
  581. .region = NULL,
  582. };
  583. u64 dpa;
  584. ret = device_for_each_child(&bus->dev, &ctx,
  585. nfit_test_search_region_spa);
  586. if (!ret)
  587. return -ENODEV;
  588. nd_region = ctx.region;
  589. dpa = ctx.addr - nd_region->ndr_start;
  590. /*
  591. * last dimm is selected for test
  592. */
  593. nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
  594. nvdimm = nd_mapping->nvdimm;
  595. spa->devices[0].nfit_device_handle = handle[nvdimm->id];
  596. spa->num_nvdimms = 1;
  597. spa->devices[0].dpa = dpa;
  598. return 0;
  599. }
  600. static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
  601. struct nd_cmd_translate_spa *spa, unsigned int buf_len)
  602. {
  603. if (buf_len < spa->translate_length)
  604. return -EINVAL;
  605. if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms)
  606. spa->status = 2;
  607. return 0;
  608. }
  609. static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len,
  610. struct nd_intel_smart *smart_data)
  611. {
  612. if (buf_len < sizeof(*smart))
  613. return -EINVAL;
  614. memcpy(smart, smart_data, sizeof(*smart));
  615. return 0;
  616. }
  617. static int nfit_test_cmd_smart_threshold(
  618. struct nd_intel_smart_threshold *out,
  619. unsigned int buf_len,
  620. struct nd_intel_smart_threshold *smart_t)
  621. {
  622. if (buf_len < sizeof(*smart_t))
  623. return -EINVAL;
  624. memcpy(out, smart_t, sizeof(*smart_t));
  625. return 0;
  626. }
  627. static void smart_notify(struct device *bus_dev,
  628. struct device *dimm_dev, struct nd_intel_smart *smart,
  629. struct nd_intel_smart_threshold *thresh)
  630. {
  631. dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
  632. __func__, thresh->alarm_control, thresh->spares,
  633. smart->spares, thresh->media_temperature,
  634. smart->media_temperature, thresh->ctrl_temperature,
  635. smart->ctrl_temperature);
  636. if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP)
  637. && smart->spares
  638. <= thresh->spares)
  639. || ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP)
  640. && smart->media_temperature
  641. >= thresh->media_temperature)
  642. || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
  643. && smart->ctrl_temperature
  644. >= thresh->ctrl_temperature)
  645. || (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH)
  646. || (smart->shutdown_state != 0)) {
  647. device_lock(bus_dev);
  648. __acpi_nvdimm_notify(dimm_dev, 0x81);
  649. device_unlock(bus_dev);
  650. }
  651. }
  652. static int nfit_test_cmd_smart_set_threshold(
  653. struct nd_intel_smart_set_threshold *in,
  654. unsigned int buf_len,
  655. struct nd_intel_smart_threshold *thresh,
  656. struct nd_intel_smart *smart,
  657. struct device *bus_dev, struct device *dimm_dev)
  658. {
  659. unsigned int size;
  660. size = sizeof(*in) - 4;
  661. if (buf_len < size)
  662. return -EINVAL;
  663. memcpy(thresh->data, in, size);
  664. in->status = 0;
  665. smart_notify(bus_dev, dimm_dev, smart, thresh);
  666. return 0;
  667. }
  668. static int nfit_test_cmd_smart_inject(
  669. struct nd_intel_smart_inject *inj,
  670. unsigned int buf_len,
  671. struct nd_intel_smart_threshold *thresh,
  672. struct nd_intel_smart *smart,
  673. struct device *bus_dev, struct device *dimm_dev)
  674. {
  675. if (buf_len != sizeof(*inj))
  676. return -EINVAL;
  677. if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) {
  678. if (inj->mtemp_enable)
  679. smart->media_temperature = inj->media_temperature;
  680. else
  681. smart->media_temperature = smart_def.media_temperature;
  682. }
  683. if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) {
  684. if (inj->spare_enable)
  685. smart->spares = inj->spares;
  686. else
  687. smart->spares = smart_def.spares;
  688. }
  689. if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) {
  690. if (inj->fatal_enable)
  691. smart->health = ND_INTEL_SMART_FATAL_HEALTH;
  692. else
  693. smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH;
  694. }
  695. if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) {
  696. if (inj->unsafe_shutdown_enable) {
  697. smart->shutdown_state = 1;
  698. smart->shutdown_count++;
  699. } else
  700. smart->shutdown_state = 0;
  701. }
  702. inj->status = 0;
  703. smart_notify(bus_dev, dimm_dev, smart, thresh);
  704. return 0;
  705. }
  706. static void uc_error_notify(struct work_struct *work)
  707. {
  708. struct nfit_test *t = container_of(work, typeof(*t), work);
  709. __acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
  710. }
  711. static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
  712. struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len)
  713. {
  714. int rc;
  715. if (buf_len != sizeof(*err_inj)) {
  716. rc = -EINVAL;
  717. goto err;
  718. }
  719. if (err_inj->err_inj_spa_range_length <= 0) {
  720. rc = -EINVAL;
  721. goto err;
  722. }
  723. rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
  724. err_inj->err_inj_spa_range_length);
  725. if (rc < 0)
  726. goto err;
  727. if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY))
  728. queue_work(nfit_wq, &t->work);
  729. err_inj->status = 0;
  730. return 0;
  731. err:
  732. err_inj->status = NFIT_ARS_INJECT_INVALID;
  733. return rc;
  734. }
  735. static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
  736. struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len)
  737. {
  738. int rc;
  739. if (buf_len != sizeof(*err_clr)) {
  740. rc = -EINVAL;
  741. goto err;
  742. }
  743. if (err_clr->err_inj_clr_spa_range_length <= 0) {
  744. rc = -EINVAL;
  745. goto err;
  746. }
  747. badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
  748. err_clr->err_inj_clr_spa_range_length);
  749. err_clr->status = 0;
  750. return 0;
  751. err:
  752. err_clr->status = NFIT_ARS_INJECT_INVALID;
  753. return rc;
  754. }
  755. static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
  756. struct nd_cmd_ars_err_inj_stat *err_stat,
  757. unsigned int buf_len)
  758. {
  759. struct badrange_entry *be;
  760. int max = SZ_4K / sizeof(struct nd_error_stat_query_record);
  761. int i = 0;
  762. err_stat->status = 0;
  763. spin_lock(&t->badrange.lock);
  764. list_for_each_entry(be, &t->badrange.list, list) {
  765. err_stat->record[i].err_inj_stat_spa_range_base = be->start;
  766. err_stat->record[i].err_inj_stat_spa_range_length = be->length;
  767. i++;
  768. if (i > max)
  769. break;
  770. }
  771. spin_unlock(&t->badrange.lock);
  772. err_stat->inj_err_rec_count = i;
  773. return 0;
  774. }
  775. static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
  776. struct nd_intel_lss *nd_cmd, unsigned int buf_len)
  777. {
  778. struct device *dev = &t->pdev.dev;
  779. if (buf_len < sizeof(*nd_cmd))
  780. return -EINVAL;
  781. switch (nd_cmd->enable) {
  782. case 0:
  783. nd_cmd->status = 0;
  784. dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n",
  785. __func__);
  786. break;
  787. case 1:
  788. nd_cmd->status = 0;
  789. dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n",
  790. __func__);
  791. break;
  792. default:
  793. dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable);
  794. nd_cmd->status = 0x3;
  795. break;
  796. }
  797. return 0;
  798. }
  799. static int override_return_code(int dimm, unsigned int func, int rc)
  800. {
  801. if ((1 << func) & dimm_fail_cmd_flags[dimm]) {
  802. if (dimm_fail_cmd_code[dimm])
  803. return dimm_fail_cmd_code[dimm];
  804. return -EIO;
  805. }
  806. return rc;
  807. }
  808. static int nd_intel_test_cmd_security_status(struct nfit_test *t,
  809. struct nd_intel_get_security_state *nd_cmd,
  810. unsigned int buf_len, int dimm)
  811. {
  812. struct device *dev = &t->pdev.dev;
  813. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  814. nd_cmd->status = 0;
  815. nd_cmd->state = sec->state;
  816. nd_cmd->extended_state = sec->ext_state;
  817. dev_dbg(dev, "security state (%#x) returned\n", nd_cmd->state);
  818. return 0;
  819. }
  820. static int nd_intel_test_cmd_unlock_unit(struct nfit_test *t,
  821. struct nd_intel_unlock_unit *nd_cmd,
  822. unsigned int buf_len, int dimm)
  823. {
  824. struct device *dev = &t->pdev.dev;
  825. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  826. if (!(sec->state & ND_INTEL_SEC_STATE_LOCKED) ||
  827. (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
  828. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  829. dev_dbg(dev, "unlock unit: invalid state: %#x\n",
  830. sec->state);
  831. } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
  832. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  833. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  834. dev_dbg(dev, "unlock unit: invalid passphrase\n");
  835. } else {
  836. nd_cmd->status = 0;
  837. sec->state = ND_INTEL_SEC_STATE_ENABLED;
  838. dev_dbg(dev, "Unit unlocked\n");
  839. }
  840. dev_dbg(dev, "unlocking status returned: %#x\n", nd_cmd->status);
  841. return 0;
  842. }
  843. static int nd_intel_test_cmd_set_pass(struct nfit_test *t,
  844. struct nd_intel_set_passphrase *nd_cmd,
  845. unsigned int buf_len, int dimm)
  846. {
  847. struct device *dev = &t->pdev.dev;
  848. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  849. if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
  850. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  851. dev_dbg(dev, "set passphrase: wrong security state\n");
  852. } else if (memcmp(nd_cmd->old_pass, sec->passphrase,
  853. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  854. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  855. dev_dbg(dev, "set passphrase: wrong passphrase\n");
  856. } else {
  857. memcpy(sec->passphrase, nd_cmd->new_pass,
  858. ND_INTEL_PASSPHRASE_SIZE);
  859. sec->state |= ND_INTEL_SEC_STATE_ENABLED;
  860. nd_cmd->status = 0;
  861. dev_dbg(dev, "passphrase updated\n");
  862. }
  863. return 0;
  864. }
  865. static int nd_intel_test_cmd_freeze_lock(struct nfit_test *t,
  866. struct nd_intel_freeze_lock *nd_cmd,
  867. unsigned int buf_len, int dimm)
  868. {
  869. struct device *dev = &t->pdev.dev;
  870. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  871. if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)) {
  872. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  873. dev_dbg(dev, "freeze lock: wrong security state\n");
  874. } else {
  875. sec->state |= ND_INTEL_SEC_STATE_FROZEN;
  876. nd_cmd->status = 0;
  877. dev_dbg(dev, "security frozen\n");
  878. }
  879. return 0;
  880. }
  881. static int nd_intel_test_cmd_disable_pass(struct nfit_test *t,
  882. struct nd_intel_disable_passphrase *nd_cmd,
  883. unsigned int buf_len, int dimm)
  884. {
  885. struct device *dev = &t->pdev.dev;
  886. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  887. if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
  888. (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
  889. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  890. dev_dbg(dev, "disable passphrase: wrong security state\n");
  891. } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
  892. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  893. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  894. dev_dbg(dev, "disable passphrase: wrong passphrase\n");
  895. } else {
  896. memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
  897. sec->state = 0;
  898. dev_dbg(dev, "disable passphrase: done\n");
  899. }
  900. return 0;
  901. }
  902. static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
  903. struct nd_intel_secure_erase *nd_cmd,
  904. unsigned int buf_len, int dimm)
  905. {
  906. struct device *dev = &t->pdev.dev;
  907. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  908. if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
  909. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  910. dev_dbg(dev, "secure erase: wrong security state\n");
  911. } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
  912. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  913. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  914. dev_dbg(dev, "secure erase: wrong passphrase\n");
  915. } else {
  916. if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
  917. && (memcmp(nd_cmd->passphrase, zero_key,
  918. ND_INTEL_PASSPHRASE_SIZE) != 0)) {
  919. dev_dbg(dev, "invalid zero key\n");
  920. return 0;
  921. }
  922. memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
  923. memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
  924. sec->state = 0;
  925. sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
  926. dev_dbg(dev, "secure erase: done\n");
  927. }
  928. return 0;
  929. }
  930. static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
  931. struct nd_intel_overwrite *nd_cmd,
  932. unsigned int buf_len, int dimm)
  933. {
  934. struct device *dev = &t->pdev.dev;
  935. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  936. if ((sec->state & ND_INTEL_SEC_STATE_ENABLED) &&
  937. memcmp(nd_cmd->passphrase, sec->passphrase,
  938. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  939. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  940. dev_dbg(dev, "overwrite: wrong passphrase\n");
  941. return 0;
  942. }
  943. sec->old_state = sec->state;
  944. sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
  945. dev_dbg(dev, "overwrite progressing.\n");
  946. sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
  947. return 0;
  948. }
  949. static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
  950. struct nd_intel_query_overwrite *nd_cmd,
  951. unsigned int buf_len, int dimm)
  952. {
  953. struct device *dev = &t->pdev.dev;
  954. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  955. if (!(sec->state & ND_INTEL_SEC_STATE_OVERWRITE)) {
  956. nd_cmd->status = ND_INTEL_STATUS_OQUERY_SEQUENCE_ERR;
  957. return 0;
  958. }
  959. if (time_is_before_jiffies64(sec->overwrite_end_time)) {
  960. sec->overwrite_end_time = 0;
  961. sec->state = sec->old_state;
  962. sec->old_state = 0;
  963. sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
  964. dev_dbg(dev, "overwrite is complete\n");
  965. } else
  966. nd_cmd->status = ND_INTEL_STATUS_OQUERY_INPROGRESS;
  967. return 0;
  968. }
  969. static int nd_intel_test_cmd_master_set_pass(struct nfit_test *t,
  970. struct nd_intel_set_master_passphrase *nd_cmd,
  971. unsigned int buf_len, int dimm)
  972. {
  973. struct device *dev = &t->pdev.dev;
  974. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  975. if (!(sec->ext_state & ND_INTEL_SEC_ESTATE_ENABLED)) {
  976. nd_cmd->status = ND_INTEL_STATUS_NOT_SUPPORTED;
  977. dev_dbg(dev, "master set passphrase: in wrong state\n");
  978. } else if (sec->ext_state & ND_INTEL_SEC_ESTATE_PLIMIT) {
  979. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  980. dev_dbg(dev, "master set passphrase: in wrong security state\n");
  981. } else if (memcmp(nd_cmd->old_pass, sec->master_passphrase,
  982. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  983. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  984. dev_dbg(dev, "master set passphrase: wrong passphrase\n");
  985. } else {
  986. memcpy(sec->master_passphrase, nd_cmd->new_pass,
  987. ND_INTEL_PASSPHRASE_SIZE);
  988. sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
  989. dev_dbg(dev, "master passphrase: updated\n");
  990. }
  991. return 0;
  992. }
  993. static int nd_intel_test_cmd_master_secure_erase(struct nfit_test *t,
  994. struct nd_intel_master_secure_erase *nd_cmd,
  995. unsigned int buf_len, int dimm)
  996. {
  997. struct device *dev = &t->pdev.dev;
  998. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  999. if (!(sec->ext_state & ND_INTEL_SEC_ESTATE_ENABLED)) {
  1000. nd_cmd->status = ND_INTEL_STATUS_NOT_SUPPORTED;
  1001. dev_dbg(dev, "master secure erase: in wrong state\n");
  1002. } else if (sec->ext_state & ND_INTEL_SEC_ESTATE_PLIMIT) {
  1003. nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
  1004. dev_dbg(dev, "master secure erase: in wrong security state\n");
  1005. } else if (memcmp(nd_cmd->passphrase, sec->master_passphrase,
  1006. ND_INTEL_PASSPHRASE_SIZE) != 0) {
  1007. nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
  1008. dev_dbg(dev, "master secure erase: wrong passphrase\n");
  1009. } else {
  1010. /* we do not erase master state passphrase ever */
  1011. sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
  1012. memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
  1013. sec->state = 0;
  1014. dev_dbg(dev, "master secure erase: done\n");
  1015. }
  1016. return 0;
  1017. }
  1018. static unsigned long last_activate;
  1019. static int nvdimm_bus_intel_fw_activate_businfo(struct nfit_test *t,
  1020. struct nd_intel_bus_fw_activate_businfo *nd_cmd,
  1021. unsigned int buf_len)
  1022. {
  1023. int i, armed = 0;
  1024. int state;
  1025. u64 tmo;
  1026. for (i = 0; i < NUM_DCR; i++) {
  1027. struct nfit_test_fw *fw = &t->fw[i];
  1028. if (fw->armed)
  1029. armed++;
  1030. }
  1031. /*
  1032. * Emulate 3 second activation max, and 1 second incremental
  1033. * quiesce time per dimm requiring multiple activates to get all
  1034. * DIMMs updated.
  1035. */
  1036. if (armed)
  1037. state = ND_INTEL_FWA_ARMED;
  1038. else if (!last_activate || time_after(jiffies, last_activate + 3 * HZ))
  1039. state = ND_INTEL_FWA_IDLE;
  1040. else
  1041. state = ND_INTEL_FWA_BUSY;
  1042. tmo = armed * USEC_PER_SEC;
  1043. *nd_cmd = (struct nd_intel_bus_fw_activate_businfo) {
  1044. .capability = ND_INTEL_BUS_FWA_CAP_FWQUIESCE
  1045. | ND_INTEL_BUS_FWA_CAP_OSQUIESCE
  1046. | ND_INTEL_BUS_FWA_CAP_RESET,
  1047. .state = state,
  1048. .activate_tmo = tmo,
  1049. .cpu_quiesce_tmo = tmo,
  1050. .io_quiesce_tmo = tmo,
  1051. .max_quiesce_tmo = 3 * USEC_PER_SEC,
  1052. };
  1053. return 0;
  1054. }
  1055. static int nvdimm_bus_intel_fw_activate(struct nfit_test *t,
  1056. struct nd_intel_bus_fw_activate *nd_cmd,
  1057. unsigned int buf_len)
  1058. {
  1059. struct nd_intel_bus_fw_activate_businfo info;
  1060. u32 status = 0;
  1061. int i;
  1062. nvdimm_bus_intel_fw_activate_businfo(t, &info, sizeof(info));
  1063. if (info.state == ND_INTEL_FWA_BUSY)
  1064. status = ND_INTEL_BUS_FWA_STATUS_BUSY;
  1065. else if (info.activate_tmo > info.max_quiesce_tmo)
  1066. status = ND_INTEL_BUS_FWA_STATUS_TMO;
  1067. else if (info.state == ND_INTEL_FWA_IDLE)
  1068. status = ND_INTEL_BUS_FWA_STATUS_NOARM;
  1069. dev_dbg(&t->pdev.dev, "status: %d\n", status);
  1070. nd_cmd->status = status;
  1071. if (status && status != ND_INTEL_BUS_FWA_STATUS_TMO)
  1072. return 0;
  1073. last_activate = jiffies;
  1074. for (i = 0; i < NUM_DCR; i++) {
  1075. struct nfit_test_fw *fw = &t->fw[i];
  1076. if (!fw->armed)
  1077. continue;
  1078. if (fw->state != FW_STATE_UPDATED)
  1079. fw->missed_activate = true;
  1080. else
  1081. fw->state = FW_STATE_NEW;
  1082. fw->armed = false;
  1083. fw->last_activate = last_activate;
  1084. }
  1085. return 0;
  1086. }
  1087. static int nd_intel_test_cmd_fw_activate_dimminfo(struct nfit_test *t,
  1088. struct nd_intel_fw_activate_dimminfo *nd_cmd,
  1089. unsigned int buf_len, int dimm)
  1090. {
  1091. struct nd_intel_bus_fw_activate_businfo info;
  1092. struct nfit_test_fw *fw = &t->fw[dimm];
  1093. u32 result, state;
  1094. nvdimm_bus_intel_fw_activate_businfo(t, &info, sizeof(info));
  1095. if (info.state == ND_INTEL_FWA_BUSY)
  1096. state = ND_INTEL_FWA_BUSY;
  1097. else if (info.state == ND_INTEL_FWA_IDLE)
  1098. state = ND_INTEL_FWA_IDLE;
  1099. else if (fw->armed)
  1100. state = ND_INTEL_FWA_ARMED;
  1101. else
  1102. state = ND_INTEL_FWA_IDLE;
  1103. result = ND_INTEL_DIMM_FWA_NONE;
  1104. if (last_activate && fw->last_activate == last_activate &&
  1105. state == ND_INTEL_FWA_IDLE) {
  1106. if (fw->missed_activate)
  1107. result = ND_INTEL_DIMM_FWA_NOTSTAGED;
  1108. else
  1109. result = ND_INTEL_DIMM_FWA_SUCCESS;
  1110. }
  1111. *nd_cmd = (struct nd_intel_fw_activate_dimminfo) {
  1112. .result = result,
  1113. .state = state,
  1114. };
  1115. return 0;
  1116. }
  1117. static int nd_intel_test_cmd_fw_activate_arm(struct nfit_test *t,
  1118. struct nd_intel_fw_activate_arm *nd_cmd,
  1119. unsigned int buf_len, int dimm)
  1120. {
  1121. struct nfit_test_fw *fw = &t->fw[dimm];
  1122. fw->armed = nd_cmd->activate_arm == ND_INTEL_DIMM_FWA_ARM;
  1123. nd_cmd->status = 0;
  1124. return 0;
  1125. }
  1126. static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
  1127. {
  1128. int i;
  1129. /* lookup per-dimm data */
  1130. for (i = 0; i < ARRAY_SIZE(handle); i++)
  1131. if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
  1132. break;
  1133. if (i >= ARRAY_SIZE(handle))
  1134. return -ENXIO;
  1135. return i;
  1136. }
  1137. static void nfit_ctl_dbg(struct acpi_nfit_desc *acpi_desc,
  1138. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  1139. unsigned int len)
  1140. {
  1141. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  1142. unsigned int func = cmd;
  1143. unsigned int family = 0;
  1144. if (cmd == ND_CMD_CALL) {
  1145. struct nd_cmd_pkg *pkg = buf;
  1146. len = pkg->nd_size_in;
  1147. family = pkg->nd_family;
  1148. buf = pkg->nd_payload;
  1149. func = pkg->nd_command;
  1150. }
  1151. dev_dbg(&t->pdev.dev, "%s family: %d cmd: %d: func: %d input length: %d\n",
  1152. nvdimm ? nvdimm_name(nvdimm) : "bus", family, cmd, func,
  1153. len);
  1154. print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 16, 4,
  1155. buf, min(len, 256u), true);
  1156. }
  1157. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  1158. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  1159. unsigned int buf_len, int *cmd_rc)
  1160. {
  1161. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1162. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  1163. unsigned int func = cmd;
  1164. int i, rc = 0, __cmd_rc;
  1165. if (!cmd_rc)
  1166. cmd_rc = &__cmd_rc;
  1167. *cmd_rc = 0;
  1168. nfit_ctl_dbg(acpi_desc, nvdimm, cmd, buf, buf_len);
  1169. if (nvdimm) {
  1170. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1171. unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
  1172. if (!nfit_mem)
  1173. return -ENOTTY;
  1174. if (cmd == ND_CMD_CALL) {
  1175. struct nd_cmd_pkg *call_pkg = buf;
  1176. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  1177. buf = (void *) call_pkg->nd_payload;
  1178. func = call_pkg->nd_command;
  1179. if (call_pkg->nd_family != nfit_mem->family)
  1180. return -ENOTTY;
  1181. i = get_dimm(nfit_mem, func);
  1182. if (i < 0)
  1183. return i;
  1184. if (i >= NUM_DCR) {
  1185. dev_WARN_ONCE(&t->pdev.dev, 1,
  1186. "ND_CMD_CALL only valid for nfit_test0\n");
  1187. return -EINVAL;
  1188. }
  1189. switch (func) {
  1190. case NVDIMM_INTEL_GET_SECURITY_STATE:
  1191. rc = nd_intel_test_cmd_security_status(t,
  1192. buf, buf_len, i);
  1193. break;
  1194. case NVDIMM_INTEL_UNLOCK_UNIT:
  1195. rc = nd_intel_test_cmd_unlock_unit(t,
  1196. buf, buf_len, i);
  1197. break;
  1198. case NVDIMM_INTEL_SET_PASSPHRASE:
  1199. rc = nd_intel_test_cmd_set_pass(t,
  1200. buf, buf_len, i);
  1201. break;
  1202. case NVDIMM_INTEL_DISABLE_PASSPHRASE:
  1203. rc = nd_intel_test_cmd_disable_pass(t,
  1204. buf, buf_len, i);
  1205. break;
  1206. case NVDIMM_INTEL_FREEZE_LOCK:
  1207. rc = nd_intel_test_cmd_freeze_lock(t,
  1208. buf, buf_len, i);
  1209. break;
  1210. case NVDIMM_INTEL_SECURE_ERASE:
  1211. rc = nd_intel_test_cmd_secure_erase(t,
  1212. buf, buf_len, i);
  1213. break;
  1214. case NVDIMM_INTEL_OVERWRITE:
  1215. rc = nd_intel_test_cmd_overwrite(t,
  1216. buf, buf_len, i);
  1217. break;
  1218. case NVDIMM_INTEL_QUERY_OVERWRITE:
  1219. rc = nd_intel_test_cmd_query_overwrite(t,
  1220. buf, buf_len, i);
  1221. break;
  1222. case NVDIMM_INTEL_SET_MASTER_PASSPHRASE:
  1223. rc = nd_intel_test_cmd_master_set_pass(t,
  1224. buf, buf_len, i);
  1225. break;
  1226. case NVDIMM_INTEL_MASTER_SECURE_ERASE:
  1227. rc = nd_intel_test_cmd_master_secure_erase(t,
  1228. buf, buf_len, i);
  1229. break;
  1230. case NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO:
  1231. rc = nd_intel_test_cmd_fw_activate_dimminfo(
  1232. t, buf, buf_len, i);
  1233. break;
  1234. case NVDIMM_INTEL_FW_ACTIVATE_ARM:
  1235. rc = nd_intel_test_cmd_fw_activate_arm(
  1236. t, buf, buf_len, i);
  1237. break;
  1238. case ND_INTEL_ENABLE_LSS_STATUS:
  1239. rc = nd_intel_test_cmd_set_lss_status(t,
  1240. buf, buf_len);
  1241. break;
  1242. case ND_INTEL_FW_GET_INFO:
  1243. rc = nd_intel_test_get_fw_info(t, buf,
  1244. buf_len, i);
  1245. break;
  1246. case ND_INTEL_FW_START_UPDATE:
  1247. rc = nd_intel_test_start_update(t, buf,
  1248. buf_len, i);
  1249. break;
  1250. case ND_INTEL_FW_SEND_DATA:
  1251. rc = nd_intel_test_send_data(t, buf,
  1252. buf_len, i);
  1253. break;
  1254. case ND_INTEL_FW_FINISH_UPDATE:
  1255. rc = nd_intel_test_finish_fw(t, buf,
  1256. buf_len, i);
  1257. break;
  1258. case ND_INTEL_FW_FINISH_QUERY:
  1259. rc = nd_intel_test_finish_query(t, buf,
  1260. buf_len, i);
  1261. break;
  1262. case ND_INTEL_SMART:
  1263. rc = nfit_test_cmd_smart(buf, buf_len,
  1264. &t->smart[i]);
  1265. break;
  1266. case ND_INTEL_SMART_THRESHOLD:
  1267. rc = nfit_test_cmd_smart_threshold(buf,
  1268. buf_len,
  1269. &t->smart_threshold[i]);
  1270. break;
  1271. case ND_INTEL_SMART_SET_THRESHOLD:
  1272. rc = nfit_test_cmd_smart_set_threshold(buf,
  1273. buf_len,
  1274. &t->smart_threshold[i],
  1275. &t->smart[i],
  1276. &t->pdev.dev, t->dimm_dev[i]);
  1277. break;
  1278. case ND_INTEL_SMART_INJECT:
  1279. rc = nfit_test_cmd_smart_inject(buf,
  1280. buf_len,
  1281. &t->smart_threshold[i],
  1282. &t->smart[i],
  1283. &t->pdev.dev, t->dimm_dev[i]);
  1284. break;
  1285. default:
  1286. return -ENOTTY;
  1287. }
  1288. return override_return_code(i, func, rc);
  1289. }
  1290. if (!test_bit(cmd, &cmd_mask)
  1291. || !test_bit(func, &nfit_mem->dsm_mask))
  1292. return -ENOTTY;
  1293. i = get_dimm(nfit_mem, func);
  1294. if (i < 0)
  1295. return i;
  1296. switch (func) {
  1297. case ND_CMD_GET_CONFIG_SIZE:
  1298. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  1299. break;
  1300. case ND_CMD_GET_CONFIG_DATA:
  1301. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  1302. t->label[i - t->dcr_idx]);
  1303. break;
  1304. case ND_CMD_SET_CONFIG_DATA:
  1305. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  1306. t->label[i - t->dcr_idx]);
  1307. break;
  1308. default:
  1309. return -ENOTTY;
  1310. }
  1311. return override_return_code(i, func, rc);
  1312. } else {
  1313. struct ars_state *ars_state = &t->ars_state;
  1314. struct nd_cmd_pkg *call_pkg = buf;
  1315. if (!nd_desc)
  1316. return -ENOTTY;
  1317. if (cmd == ND_CMD_CALL && call_pkg->nd_family
  1318. == NVDIMM_BUS_FAMILY_NFIT) {
  1319. func = call_pkg->nd_command;
  1320. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  1321. buf = (void *) call_pkg->nd_payload;
  1322. switch (func) {
  1323. case NFIT_CMD_TRANSLATE_SPA:
  1324. rc = nfit_test_cmd_translate_spa(
  1325. acpi_desc->nvdimm_bus, buf, buf_len);
  1326. return rc;
  1327. case NFIT_CMD_ARS_INJECT_SET:
  1328. rc = nfit_test_cmd_ars_error_inject(t, buf,
  1329. buf_len);
  1330. return rc;
  1331. case NFIT_CMD_ARS_INJECT_CLEAR:
  1332. rc = nfit_test_cmd_ars_inject_clear(t, buf,
  1333. buf_len);
  1334. return rc;
  1335. case NFIT_CMD_ARS_INJECT_GET:
  1336. rc = nfit_test_cmd_ars_inject_status(t, buf,
  1337. buf_len);
  1338. return rc;
  1339. default:
  1340. return -ENOTTY;
  1341. }
  1342. } else if (cmd == ND_CMD_CALL && call_pkg->nd_family
  1343. == NVDIMM_BUS_FAMILY_INTEL) {
  1344. func = call_pkg->nd_command;
  1345. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  1346. buf = (void *) call_pkg->nd_payload;
  1347. switch (func) {
  1348. case NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO:
  1349. rc = nvdimm_bus_intel_fw_activate_businfo(t,
  1350. buf, buf_len);
  1351. return rc;
  1352. case NVDIMM_BUS_INTEL_FW_ACTIVATE:
  1353. rc = nvdimm_bus_intel_fw_activate(t, buf,
  1354. buf_len);
  1355. return rc;
  1356. default:
  1357. return -ENOTTY;
  1358. }
  1359. } else if (cmd == ND_CMD_CALL)
  1360. return -ENOTTY;
  1361. if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
  1362. return -ENOTTY;
  1363. switch (func) {
  1364. case ND_CMD_ARS_CAP:
  1365. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  1366. break;
  1367. case ND_CMD_ARS_START:
  1368. rc = nfit_test_cmd_ars_start(t, ars_state, buf,
  1369. buf_len, cmd_rc);
  1370. break;
  1371. case ND_CMD_ARS_STATUS:
  1372. rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
  1373. cmd_rc);
  1374. break;
  1375. case ND_CMD_CLEAR_ERROR:
  1376. rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
  1377. break;
  1378. default:
  1379. return -ENOTTY;
  1380. }
  1381. }
  1382. return rc;
  1383. }
  1384. static DEFINE_SPINLOCK(nfit_test_lock);
  1385. static struct nfit_test *instances[NUM_NFITS];
  1386. static void release_nfit_res(void *data)
  1387. {
  1388. struct nfit_test_resource *nfit_res = data;
  1389. spin_lock(&nfit_test_lock);
  1390. list_del(&nfit_res->list);
  1391. spin_unlock(&nfit_test_lock);
  1392. if (resource_size(&nfit_res->res) >= DIMM_SIZE)
  1393. gen_pool_free(nfit_pool, nfit_res->res.start,
  1394. resource_size(&nfit_res->res));
  1395. vfree(nfit_res->buf);
  1396. kfree(nfit_res);
  1397. }
  1398. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  1399. void *buf)
  1400. {
  1401. struct device *dev = &t->pdev.dev;
  1402. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  1403. GFP_KERNEL);
  1404. int rc;
  1405. if (!buf || !nfit_res || !*dma)
  1406. goto err;
  1407. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  1408. if (rc)
  1409. goto err;
  1410. INIT_LIST_HEAD(&nfit_res->list);
  1411. memset(buf, 0, size);
  1412. nfit_res->dev = dev;
  1413. nfit_res->buf = buf;
  1414. nfit_res->res.start = *dma;
  1415. nfit_res->res.end = *dma + size - 1;
  1416. nfit_res->res.name = "NFIT";
  1417. spin_lock_init(&nfit_res->lock);
  1418. INIT_LIST_HEAD(&nfit_res->requests);
  1419. spin_lock(&nfit_test_lock);
  1420. list_add(&nfit_res->list, &t->resources);
  1421. spin_unlock(&nfit_test_lock);
  1422. return nfit_res->buf;
  1423. err:
  1424. if (*dma && size >= DIMM_SIZE)
  1425. gen_pool_free(nfit_pool, *dma, size);
  1426. if (buf)
  1427. vfree(buf);
  1428. kfree(nfit_res);
  1429. return NULL;
  1430. }
  1431. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  1432. {
  1433. struct genpool_data_align data = {
  1434. .align = SZ_128M,
  1435. };
  1436. void *buf = vmalloc(size);
  1437. if (size >= DIMM_SIZE)
  1438. *dma = gen_pool_alloc_algo(nfit_pool, size,
  1439. gen_pool_first_fit_align, &data);
  1440. else
  1441. *dma = (unsigned long) buf;
  1442. return __test_alloc(t, size, dma, buf);
  1443. }
  1444. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  1445. {
  1446. int i;
  1447. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  1448. struct nfit_test_resource *n, *nfit_res = NULL;
  1449. struct nfit_test *t = instances[i];
  1450. if (!t)
  1451. continue;
  1452. spin_lock(&nfit_test_lock);
  1453. list_for_each_entry(n, &t->resources, list) {
  1454. if (addr >= n->res.start && (addr < n->res.start
  1455. + resource_size(&n->res))) {
  1456. nfit_res = n;
  1457. break;
  1458. } else if (addr >= (unsigned long) n->buf
  1459. && (addr < (unsigned long) n->buf
  1460. + resource_size(&n->res))) {
  1461. nfit_res = n;
  1462. break;
  1463. }
  1464. }
  1465. spin_unlock(&nfit_test_lock);
  1466. if (nfit_res)
  1467. return nfit_res;
  1468. }
  1469. return NULL;
  1470. }
  1471. static int ars_state_init(struct device *dev, struct ars_state *ars_state)
  1472. {
  1473. /* for testing, only store up to n records that fit within 4k */
  1474. ars_state->ars_status = devm_kzalloc(dev,
  1475. sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL);
  1476. if (!ars_state->ars_status)
  1477. return -ENOMEM;
  1478. spin_lock_init(&ars_state->lock);
  1479. return 0;
  1480. }
  1481. static void put_dimms(void *data)
  1482. {
  1483. struct nfit_test *t = data;
  1484. int i;
  1485. for (i = 0; i < t->num_dcr; i++)
  1486. if (t->dimm_dev[i])
  1487. device_unregister(t->dimm_dev[i]);
  1488. }
  1489. static struct class *nfit_test_dimm;
  1490. static int dimm_name_to_id(struct device *dev)
  1491. {
  1492. int dimm;
  1493. if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1)
  1494. return -ENXIO;
  1495. return dimm;
  1496. }
  1497. static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
  1498. char *buf)
  1499. {
  1500. int dimm = dimm_name_to_id(dev);
  1501. if (dimm < 0)
  1502. return dimm;
  1503. return sprintf(buf, "%#x\n", handle[dimm]);
  1504. }
  1505. DEVICE_ATTR_RO(handle);
  1506. static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
  1507. char *buf)
  1508. {
  1509. int dimm = dimm_name_to_id(dev);
  1510. if (dimm < 0)
  1511. return dimm;
  1512. return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
  1513. }
  1514. static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
  1515. const char *buf, size_t size)
  1516. {
  1517. int dimm = dimm_name_to_id(dev);
  1518. unsigned long val;
  1519. ssize_t rc;
  1520. if (dimm < 0)
  1521. return dimm;
  1522. rc = kstrtol(buf, 0, &val);
  1523. if (rc)
  1524. return rc;
  1525. dimm_fail_cmd_flags[dimm] = val;
  1526. return size;
  1527. }
  1528. static DEVICE_ATTR_RW(fail_cmd);
  1529. static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
  1530. char *buf)
  1531. {
  1532. int dimm = dimm_name_to_id(dev);
  1533. if (dimm < 0)
  1534. return dimm;
  1535. return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]);
  1536. }
  1537. static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
  1538. const char *buf, size_t size)
  1539. {
  1540. int dimm = dimm_name_to_id(dev);
  1541. unsigned long val;
  1542. ssize_t rc;
  1543. if (dimm < 0)
  1544. return dimm;
  1545. rc = kstrtol(buf, 0, &val);
  1546. if (rc)
  1547. return rc;
  1548. dimm_fail_cmd_code[dimm] = val;
  1549. return size;
  1550. }
  1551. static DEVICE_ATTR_RW(fail_cmd_code);
  1552. static ssize_t lock_dimm_store(struct device *dev,
  1553. struct device_attribute *attr, const char *buf, size_t size)
  1554. {
  1555. int dimm = dimm_name_to_id(dev);
  1556. struct nfit_test_sec *sec = &dimm_sec_info[dimm];
  1557. sec->state = ND_INTEL_SEC_STATE_ENABLED | ND_INTEL_SEC_STATE_LOCKED;
  1558. return size;
  1559. }
  1560. static DEVICE_ATTR_WO(lock_dimm);
  1561. static struct attribute *nfit_test_dimm_attributes[] = {
  1562. &dev_attr_fail_cmd.attr,
  1563. &dev_attr_fail_cmd_code.attr,
  1564. &dev_attr_handle.attr,
  1565. &dev_attr_lock_dimm.attr,
  1566. NULL,
  1567. };
  1568. static struct attribute_group nfit_test_dimm_attribute_group = {
  1569. .attrs = nfit_test_dimm_attributes,
  1570. };
  1571. static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
  1572. &nfit_test_dimm_attribute_group,
  1573. NULL,
  1574. };
  1575. static int nfit_test_dimm_init(struct nfit_test *t)
  1576. {
  1577. int i;
  1578. if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t))
  1579. return -ENOMEM;
  1580. for (i = 0; i < t->num_dcr; i++) {
  1581. t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
  1582. &t->pdev.dev, 0, NULL,
  1583. nfit_test_dimm_attribute_groups,
  1584. "test_dimm%d", i + t->dcr_idx);
  1585. if (!t->dimm_dev[i])
  1586. return -ENOMEM;
  1587. }
  1588. return 0;
  1589. }
  1590. static void nfit_security_init(struct nfit_test *t)
  1591. {
  1592. int i;
  1593. for (i = 0; i < t->num_dcr; i++) {
  1594. struct nfit_test_sec *sec = &dimm_sec_info[i];
  1595. sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
  1596. }
  1597. }
  1598. static void smart_init(struct nfit_test *t)
  1599. {
  1600. int i;
  1601. const struct nd_intel_smart_threshold smart_t_data = {
  1602. .alarm_control = ND_INTEL_SMART_SPARE_TRIP
  1603. | ND_INTEL_SMART_TEMP_TRIP,
  1604. .media_temperature = 40 * 16,
  1605. .ctrl_temperature = 30 * 16,
  1606. .spares = 5,
  1607. };
  1608. for (i = 0; i < t->num_dcr; i++) {
  1609. memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
  1610. memcpy(&t->smart_threshold[i], &smart_t_data,
  1611. sizeof(smart_t_data));
  1612. }
  1613. }
  1614. static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
  1615. {
  1616. /* until spa location cookie support is added... */
  1617. return sizeof(*spa) - 8;
  1618. }
  1619. static int nfit_test0_alloc(struct nfit_test *t)
  1620. {
  1621. struct acpi_nfit_system_address *spa = NULL;
  1622. size_t nfit_size = sizeof_spa(spa) * NUM_SPA
  1623. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  1624. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  1625. + offsetof(struct acpi_nfit_control_region,
  1626. window_size) * NUM_DCR
  1627. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  1628. + (sizeof(struct acpi_nfit_flush_address)
  1629. + sizeof(u64) * NUM_HINTS) * NUM_DCR
  1630. + sizeof(struct acpi_nfit_capabilities);
  1631. int i;
  1632. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  1633. if (!t->nfit_buf)
  1634. return -ENOMEM;
  1635. t->nfit_size = nfit_size;
  1636. t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
  1637. if (!t->spa_set[0])
  1638. return -ENOMEM;
  1639. t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
  1640. if (!t->spa_set[1])
  1641. return -ENOMEM;
  1642. t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
  1643. if (!t->spa_set[2])
  1644. return -ENOMEM;
  1645. for (i = 0; i < t->num_dcr; i++) {
  1646. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  1647. if (!t->dimm[i])
  1648. return -ENOMEM;
  1649. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  1650. if (!t->label[i])
  1651. return -ENOMEM;
  1652. sprintf(t->label[i], "label%d", i);
  1653. t->flush[i] = test_alloc(t, max(PAGE_SIZE,
  1654. sizeof(u64) * NUM_HINTS),
  1655. &t->flush_dma[i]);
  1656. if (!t->flush[i])
  1657. return -ENOMEM;
  1658. }
  1659. for (i = 0; i < t->num_dcr; i++) {
  1660. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  1661. if (!t->dcr[i])
  1662. return -ENOMEM;
  1663. }
  1664. t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
  1665. if (!t->_fit)
  1666. return -ENOMEM;
  1667. if (nfit_test_dimm_init(t))
  1668. return -ENOMEM;
  1669. smart_init(t);
  1670. nfit_security_init(t);
  1671. return ars_state_init(&t->pdev.dev, &t->ars_state);
  1672. }
  1673. static int nfit_test1_alloc(struct nfit_test *t)
  1674. {
  1675. struct acpi_nfit_system_address *spa = NULL;
  1676. size_t nfit_size = sizeof_spa(spa) * 2
  1677. + sizeof(struct acpi_nfit_memory_map) * 2
  1678. + offsetof(struct acpi_nfit_control_region, window_size) * 2;
  1679. int i;
  1680. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  1681. if (!t->nfit_buf)
  1682. return -ENOMEM;
  1683. t->nfit_size = nfit_size;
  1684. t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
  1685. if (!t->spa_set[0])
  1686. return -ENOMEM;
  1687. for (i = 0; i < t->num_dcr; i++) {
  1688. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  1689. if (!t->label[i])
  1690. return -ENOMEM;
  1691. sprintf(t->label[i], "label%d", i);
  1692. }
  1693. t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
  1694. if (!t->spa_set[1])
  1695. return -ENOMEM;
  1696. if (nfit_test_dimm_init(t))
  1697. return -ENOMEM;
  1698. smart_init(t);
  1699. return ars_state_init(&t->pdev.dev, &t->ars_state);
  1700. }
  1701. static void dcr_common_init(struct acpi_nfit_control_region *dcr)
  1702. {
  1703. dcr->vendor_id = 0xabcd;
  1704. dcr->device_id = 0;
  1705. dcr->revision_id = 1;
  1706. dcr->valid_fields = 1;
  1707. dcr->manufacturing_location = 0xa;
  1708. dcr->manufacturing_date = cpu_to_be16(2016);
  1709. }
  1710. static void nfit_test0_setup(struct nfit_test *t)
  1711. {
  1712. const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
  1713. + (sizeof(u64) * NUM_HINTS);
  1714. struct acpi_nfit_desc *acpi_desc;
  1715. struct acpi_nfit_memory_map *memdev;
  1716. void *nfit_buf = t->nfit_buf;
  1717. struct acpi_nfit_system_address *spa;
  1718. struct acpi_nfit_control_region *dcr;
  1719. struct acpi_nfit_data_region *bdw;
  1720. struct acpi_nfit_flush_address *flush;
  1721. struct acpi_nfit_capabilities *pcap;
  1722. unsigned int offset = 0, i;
  1723. unsigned long *acpi_mask;
  1724. /*
  1725. * spa0 (interleave first half of dimm0 and dimm1, note storage
  1726. * does not actually alias the related block-data-window
  1727. * regions)
  1728. */
  1729. spa = nfit_buf;
  1730. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1731. spa->header.length = sizeof_spa(spa);
  1732. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1733. spa->range_index = 0+1;
  1734. spa->address = t->spa_set_dma[0];
  1735. spa->length = SPA0_SIZE;
  1736. offset += spa->header.length;
  1737. /*
  1738. * spa1 (interleave last half of the 4 DIMMS, note storage
  1739. * does not actually alias the related block-data-window
  1740. * regions)
  1741. */
  1742. spa = nfit_buf + offset;
  1743. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1744. spa->header.length = sizeof_spa(spa);
  1745. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1746. spa->range_index = 1+1;
  1747. spa->address = t->spa_set_dma[1];
  1748. spa->length = SPA1_SIZE;
  1749. offset += spa->header.length;
  1750. /* spa2 (dcr0) dimm0 */
  1751. spa = nfit_buf + offset;
  1752. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1753. spa->header.length = sizeof_spa(spa);
  1754. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1755. spa->range_index = 2+1;
  1756. spa->address = t->dcr_dma[0];
  1757. spa->length = DCR_SIZE;
  1758. offset += spa->header.length;
  1759. /* spa3 (dcr1) dimm1 */
  1760. spa = nfit_buf + offset;
  1761. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1762. spa->header.length = sizeof_spa(spa);
  1763. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1764. spa->range_index = 3+1;
  1765. spa->address = t->dcr_dma[1];
  1766. spa->length = DCR_SIZE;
  1767. offset += spa->header.length;
  1768. /* spa4 (dcr2) dimm2 */
  1769. spa = nfit_buf + offset;
  1770. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1771. spa->header.length = sizeof_spa(spa);
  1772. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1773. spa->range_index = 4+1;
  1774. spa->address = t->dcr_dma[2];
  1775. spa->length = DCR_SIZE;
  1776. offset += spa->header.length;
  1777. /* spa5 (dcr3) dimm3 */
  1778. spa = nfit_buf + offset;
  1779. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1780. spa->header.length = sizeof_spa(spa);
  1781. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1782. spa->range_index = 5+1;
  1783. spa->address = t->dcr_dma[3];
  1784. spa->length = DCR_SIZE;
  1785. offset += spa->header.length;
  1786. /* spa6 (bdw for dcr0) dimm0 */
  1787. spa = nfit_buf + offset;
  1788. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1789. spa->header.length = sizeof_spa(spa);
  1790. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1791. spa->range_index = 6+1;
  1792. spa->address = t->dimm_dma[0];
  1793. spa->length = DIMM_SIZE;
  1794. offset += spa->header.length;
  1795. /* spa7 (bdw for dcr1) dimm1 */
  1796. spa = nfit_buf + offset;
  1797. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1798. spa->header.length = sizeof_spa(spa);
  1799. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1800. spa->range_index = 7+1;
  1801. spa->address = t->dimm_dma[1];
  1802. spa->length = DIMM_SIZE;
  1803. offset += spa->header.length;
  1804. /* spa8 (bdw for dcr2) dimm2 */
  1805. spa = nfit_buf + offset;
  1806. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1807. spa->header.length = sizeof_spa(spa);
  1808. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1809. spa->range_index = 8+1;
  1810. spa->address = t->dimm_dma[2];
  1811. spa->length = DIMM_SIZE;
  1812. offset += spa->header.length;
  1813. /* spa9 (bdw for dcr3) dimm3 */
  1814. spa = nfit_buf + offset;
  1815. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1816. spa->header.length = sizeof_spa(spa);
  1817. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1818. spa->range_index = 9+1;
  1819. spa->address = t->dimm_dma[3];
  1820. spa->length = DIMM_SIZE;
  1821. offset += spa->header.length;
  1822. /* mem-region0 (spa0, dimm0) */
  1823. memdev = nfit_buf + offset;
  1824. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1825. memdev->header.length = sizeof(*memdev);
  1826. memdev->device_handle = handle[0];
  1827. memdev->physical_id = 0;
  1828. memdev->region_id = 0;
  1829. memdev->range_index = 0+1;
  1830. memdev->region_index = 4+1;
  1831. memdev->region_size = SPA0_SIZE/2;
  1832. memdev->region_offset = 1;
  1833. memdev->address = 0;
  1834. memdev->interleave_index = 0;
  1835. memdev->interleave_ways = 2;
  1836. offset += memdev->header.length;
  1837. /* mem-region1 (spa0, dimm1) */
  1838. memdev = nfit_buf + offset;
  1839. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1840. memdev->header.length = sizeof(*memdev);
  1841. memdev->device_handle = handle[1];
  1842. memdev->physical_id = 1;
  1843. memdev->region_id = 0;
  1844. memdev->range_index = 0+1;
  1845. memdev->region_index = 5+1;
  1846. memdev->region_size = SPA0_SIZE/2;
  1847. memdev->region_offset = (1 << 8);
  1848. memdev->address = 0;
  1849. memdev->interleave_index = 0;
  1850. memdev->interleave_ways = 2;
  1851. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1852. offset += memdev->header.length;
  1853. /* mem-region2 (spa1, dimm0) */
  1854. memdev = nfit_buf + offset;
  1855. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1856. memdev->header.length = sizeof(*memdev);
  1857. memdev->device_handle = handle[0];
  1858. memdev->physical_id = 0;
  1859. memdev->region_id = 1;
  1860. memdev->range_index = 1+1;
  1861. memdev->region_index = 4+1;
  1862. memdev->region_size = SPA1_SIZE/4;
  1863. memdev->region_offset = (1 << 16);
  1864. memdev->address = SPA0_SIZE/2;
  1865. memdev->interleave_index = 0;
  1866. memdev->interleave_ways = 4;
  1867. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1868. offset += memdev->header.length;
  1869. /* mem-region3 (spa1, dimm1) */
  1870. memdev = nfit_buf + offset;
  1871. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1872. memdev->header.length = sizeof(*memdev);
  1873. memdev->device_handle = handle[1];
  1874. memdev->physical_id = 1;
  1875. memdev->region_id = 1;
  1876. memdev->range_index = 1+1;
  1877. memdev->region_index = 5+1;
  1878. memdev->region_size = SPA1_SIZE/4;
  1879. memdev->region_offset = (1 << 24);
  1880. memdev->address = SPA0_SIZE/2;
  1881. memdev->interleave_index = 0;
  1882. memdev->interleave_ways = 4;
  1883. offset += memdev->header.length;
  1884. /* mem-region4 (spa1, dimm2) */
  1885. memdev = nfit_buf + offset;
  1886. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1887. memdev->header.length = sizeof(*memdev);
  1888. memdev->device_handle = handle[2];
  1889. memdev->physical_id = 2;
  1890. memdev->region_id = 0;
  1891. memdev->range_index = 1+1;
  1892. memdev->region_index = 6+1;
  1893. memdev->region_size = SPA1_SIZE/4;
  1894. memdev->region_offset = (1ULL << 32);
  1895. memdev->address = SPA0_SIZE/2;
  1896. memdev->interleave_index = 0;
  1897. memdev->interleave_ways = 4;
  1898. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1899. offset += memdev->header.length;
  1900. /* mem-region5 (spa1, dimm3) */
  1901. memdev = nfit_buf + offset;
  1902. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1903. memdev->header.length = sizeof(*memdev);
  1904. memdev->device_handle = handle[3];
  1905. memdev->physical_id = 3;
  1906. memdev->region_id = 0;
  1907. memdev->range_index = 1+1;
  1908. memdev->region_index = 7+1;
  1909. memdev->region_size = SPA1_SIZE/4;
  1910. memdev->region_offset = (1ULL << 40);
  1911. memdev->address = SPA0_SIZE/2;
  1912. memdev->interleave_index = 0;
  1913. memdev->interleave_ways = 4;
  1914. offset += memdev->header.length;
  1915. /* mem-region6 (spa/dcr0, dimm0) */
  1916. memdev = nfit_buf + offset;
  1917. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1918. memdev->header.length = sizeof(*memdev);
  1919. memdev->device_handle = handle[0];
  1920. memdev->physical_id = 0;
  1921. memdev->region_id = 0;
  1922. memdev->range_index = 2+1;
  1923. memdev->region_index = 0+1;
  1924. memdev->region_size = 0;
  1925. memdev->region_offset = 0;
  1926. memdev->address = 0;
  1927. memdev->interleave_index = 0;
  1928. memdev->interleave_ways = 1;
  1929. offset += memdev->header.length;
  1930. /* mem-region7 (spa/dcr1, dimm1) */
  1931. memdev = nfit_buf + offset;
  1932. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1933. memdev->header.length = sizeof(*memdev);
  1934. memdev->device_handle = handle[1];
  1935. memdev->physical_id = 1;
  1936. memdev->region_id = 0;
  1937. memdev->range_index = 3+1;
  1938. memdev->region_index = 1+1;
  1939. memdev->region_size = 0;
  1940. memdev->region_offset = 0;
  1941. memdev->address = 0;
  1942. memdev->interleave_index = 0;
  1943. memdev->interleave_ways = 1;
  1944. offset += memdev->header.length;
  1945. /* mem-region8 (spa/dcr2, dimm2) */
  1946. memdev = nfit_buf + offset;
  1947. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1948. memdev->header.length = sizeof(*memdev);
  1949. memdev->device_handle = handle[2];
  1950. memdev->physical_id = 2;
  1951. memdev->region_id = 0;
  1952. memdev->range_index = 4+1;
  1953. memdev->region_index = 2+1;
  1954. memdev->region_size = 0;
  1955. memdev->region_offset = 0;
  1956. memdev->address = 0;
  1957. memdev->interleave_index = 0;
  1958. memdev->interleave_ways = 1;
  1959. offset += memdev->header.length;
  1960. /* mem-region9 (spa/dcr3, dimm3) */
  1961. memdev = nfit_buf + offset;
  1962. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1963. memdev->header.length = sizeof(*memdev);
  1964. memdev->device_handle = handle[3];
  1965. memdev->physical_id = 3;
  1966. memdev->region_id = 0;
  1967. memdev->range_index = 5+1;
  1968. memdev->region_index = 3+1;
  1969. memdev->region_size = 0;
  1970. memdev->region_offset = 0;
  1971. memdev->address = 0;
  1972. memdev->interleave_index = 0;
  1973. memdev->interleave_ways = 1;
  1974. offset += memdev->header.length;
  1975. /* mem-region10 (spa/bdw0, dimm0) */
  1976. memdev = nfit_buf + offset;
  1977. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1978. memdev->header.length = sizeof(*memdev);
  1979. memdev->device_handle = handle[0];
  1980. memdev->physical_id = 0;
  1981. memdev->region_id = 0;
  1982. memdev->range_index = 6+1;
  1983. memdev->region_index = 0+1;
  1984. memdev->region_size = 0;
  1985. memdev->region_offset = 0;
  1986. memdev->address = 0;
  1987. memdev->interleave_index = 0;
  1988. memdev->interleave_ways = 1;
  1989. offset += memdev->header.length;
  1990. /* mem-region11 (spa/bdw1, dimm1) */
  1991. memdev = nfit_buf + offset;
  1992. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1993. memdev->header.length = sizeof(*memdev);
  1994. memdev->device_handle = handle[1];
  1995. memdev->physical_id = 1;
  1996. memdev->region_id = 0;
  1997. memdev->range_index = 7+1;
  1998. memdev->region_index = 1+1;
  1999. memdev->region_size = 0;
  2000. memdev->region_offset = 0;
  2001. memdev->address = 0;
  2002. memdev->interleave_index = 0;
  2003. memdev->interleave_ways = 1;
  2004. offset += memdev->header.length;
  2005. /* mem-region12 (spa/bdw2, dimm2) */
  2006. memdev = nfit_buf + offset;
  2007. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2008. memdev->header.length = sizeof(*memdev);
  2009. memdev->device_handle = handle[2];
  2010. memdev->physical_id = 2;
  2011. memdev->region_id = 0;
  2012. memdev->range_index = 8+1;
  2013. memdev->region_index = 2+1;
  2014. memdev->region_size = 0;
  2015. memdev->region_offset = 0;
  2016. memdev->address = 0;
  2017. memdev->interleave_index = 0;
  2018. memdev->interleave_ways = 1;
  2019. offset += memdev->header.length;
  2020. /* mem-region13 (spa/dcr3, dimm3) */
  2021. memdev = nfit_buf + offset;
  2022. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2023. memdev->header.length = sizeof(*memdev);
  2024. memdev->device_handle = handle[3];
  2025. memdev->physical_id = 3;
  2026. memdev->region_id = 0;
  2027. memdev->range_index = 9+1;
  2028. memdev->region_index = 3+1;
  2029. memdev->region_size = 0;
  2030. memdev->region_offset = 0;
  2031. memdev->address = 0;
  2032. memdev->interleave_index = 0;
  2033. memdev->interleave_ways = 1;
  2034. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  2035. offset += memdev->header.length;
  2036. /* dcr-descriptor0: blk */
  2037. dcr = nfit_buf + offset;
  2038. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2039. dcr->header.length = sizeof(*dcr);
  2040. dcr->region_index = 0+1;
  2041. dcr_common_init(dcr);
  2042. dcr->serial_number = ~handle[0];
  2043. dcr->code = NFIT_FIC_BLK;
  2044. dcr->windows = 1;
  2045. dcr->window_size = DCR_SIZE;
  2046. dcr->command_offset = 0;
  2047. dcr->command_size = 8;
  2048. dcr->status_offset = 8;
  2049. dcr->status_size = 4;
  2050. offset += dcr->header.length;
  2051. /* dcr-descriptor1: blk */
  2052. dcr = nfit_buf + offset;
  2053. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2054. dcr->header.length = sizeof(*dcr);
  2055. dcr->region_index = 1+1;
  2056. dcr_common_init(dcr);
  2057. dcr->serial_number = ~handle[1];
  2058. dcr->code = NFIT_FIC_BLK;
  2059. dcr->windows = 1;
  2060. dcr->window_size = DCR_SIZE;
  2061. dcr->command_offset = 0;
  2062. dcr->command_size = 8;
  2063. dcr->status_offset = 8;
  2064. dcr->status_size = 4;
  2065. offset += dcr->header.length;
  2066. /* dcr-descriptor2: blk */
  2067. dcr = nfit_buf + offset;
  2068. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2069. dcr->header.length = sizeof(*dcr);
  2070. dcr->region_index = 2+1;
  2071. dcr_common_init(dcr);
  2072. dcr->serial_number = ~handle[2];
  2073. dcr->code = NFIT_FIC_BLK;
  2074. dcr->windows = 1;
  2075. dcr->window_size = DCR_SIZE;
  2076. dcr->command_offset = 0;
  2077. dcr->command_size = 8;
  2078. dcr->status_offset = 8;
  2079. dcr->status_size = 4;
  2080. offset += dcr->header.length;
  2081. /* dcr-descriptor3: blk */
  2082. dcr = nfit_buf + offset;
  2083. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2084. dcr->header.length = sizeof(*dcr);
  2085. dcr->region_index = 3+1;
  2086. dcr_common_init(dcr);
  2087. dcr->serial_number = ~handle[3];
  2088. dcr->code = NFIT_FIC_BLK;
  2089. dcr->windows = 1;
  2090. dcr->window_size = DCR_SIZE;
  2091. dcr->command_offset = 0;
  2092. dcr->command_size = 8;
  2093. dcr->status_offset = 8;
  2094. dcr->status_size = 4;
  2095. offset += dcr->header.length;
  2096. /* dcr-descriptor0: pmem */
  2097. dcr = nfit_buf + offset;
  2098. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2099. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2100. window_size);
  2101. dcr->region_index = 4+1;
  2102. dcr_common_init(dcr);
  2103. dcr->serial_number = ~handle[0];
  2104. dcr->code = NFIT_FIC_BYTEN;
  2105. dcr->windows = 0;
  2106. offset += dcr->header.length;
  2107. /* dcr-descriptor1: pmem */
  2108. dcr = nfit_buf + offset;
  2109. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2110. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2111. window_size);
  2112. dcr->region_index = 5+1;
  2113. dcr_common_init(dcr);
  2114. dcr->serial_number = ~handle[1];
  2115. dcr->code = NFIT_FIC_BYTEN;
  2116. dcr->windows = 0;
  2117. offset += dcr->header.length;
  2118. /* dcr-descriptor2: pmem */
  2119. dcr = nfit_buf + offset;
  2120. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2121. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2122. window_size);
  2123. dcr->region_index = 6+1;
  2124. dcr_common_init(dcr);
  2125. dcr->serial_number = ~handle[2];
  2126. dcr->code = NFIT_FIC_BYTEN;
  2127. dcr->windows = 0;
  2128. offset += dcr->header.length;
  2129. /* dcr-descriptor3: pmem */
  2130. dcr = nfit_buf + offset;
  2131. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2132. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2133. window_size);
  2134. dcr->region_index = 7+1;
  2135. dcr_common_init(dcr);
  2136. dcr->serial_number = ~handle[3];
  2137. dcr->code = NFIT_FIC_BYTEN;
  2138. dcr->windows = 0;
  2139. offset += dcr->header.length;
  2140. /* bdw0 (spa/dcr0, dimm0) */
  2141. bdw = nfit_buf + offset;
  2142. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  2143. bdw->header.length = sizeof(*bdw);
  2144. bdw->region_index = 0+1;
  2145. bdw->windows = 1;
  2146. bdw->offset = 0;
  2147. bdw->size = BDW_SIZE;
  2148. bdw->capacity = DIMM_SIZE;
  2149. bdw->start_address = 0;
  2150. offset += bdw->header.length;
  2151. /* bdw1 (spa/dcr1, dimm1) */
  2152. bdw = nfit_buf + offset;
  2153. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  2154. bdw->header.length = sizeof(*bdw);
  2155. bdw->region_index = 1+1;
  2156. bdw->windows = 1;
  2157. bdw->offset = 0;
  2158. bdw->size = BDW_SIZE;
  2159. bdw->capacity = DIMM_SIZE;
  2160. bdw->start_address = 0;
  2161. offset += bdw->header.length;
  2162. /* bdw2 (spa/dcr2, dimm2) */
  2163. bdw = nfit_buf + offset;
  2164. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  2165. bdw->header.length = sizeof(*bdw);
  2166. bdw->region_index = 2+1;
  2167. bdw->windows = 1;
  2168. bdw->offset = 0;
  2169. bdw->size = BDW_SIZE;
  2170. bdw->capacity = DIMM_SIZE;
  2171. bdw->start_address = 0;
  2172. offset += bdw->header.length;
  2173. /* bdw3 (spa/dcr3, dimm3) */
  2174. bdw = nfit_buf + offset;
  2175. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  2176. bdw->header.length = sizeof(*bdw);
  2177. bdw->region_index = 3+1;
  2178. bdw->windows = 1;
  2179. bdw->offset = 0;
  2180. bdw->size = BDW_SIZE;
  2181. bdw->capacity = DIMM_SIZE;
  2182. bdw->start_address = 0;
  2183. offset += bdw->header.length;
  2184. /* flush0 (dimm0) */
  2185. flush = nfit_buf + offset;
  2186. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  2187. flush->header.length = flush_hint_size;
  2188. flush->device_handle = handle[0];
  2189. flush->hint_count = NUM_HINTS;
  2190. for (i = 0; i < NUM_HINTS; i++)
  2191. flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
  2192. offset += flush->header.length;
  2193. /* flush1 (dimm1) */
  2194. flush = nfit_buf + offset;
  2195. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  2196. flush->header.length = flush_hint_size;
  2197. flush->device_handle = handle[1];
  2198. flush->hint_count = NUM_HINTS;
  2199. for (i = 0; i < NUM_HINTS; i++)
  2200. flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
  2201. offset += flush->header.length;
  2202. /* flush2 (dimm2) */
  2203. flush = nfit_buf + offset;
  2204. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  2205. flush->header.length = flush_hint_size;
  2206. flush->device_handle = handle[2];
  2207. flush->hint_count = NUM_HINTS;
  2208. for (i = 0; i < NUM_HINTS; i++)
  2209. flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
  2210. offset += flush->header.length;
  2211. /* flush3 (dimm3) */
  2212. flush = nfit_buf + offset;
  2213. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  2214. flush->header.length = flush_hint_size;
  2215. flush->device_handle = handle[3];
  2216. flush->hint_count = NUM_HINTS;
  2217. for (i = 0; i < NUM_HINTS; i++)
  2218. flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
  2219. offset += flush->header.length;
  2220. /* platform capabilities */
  2221. pcap = nfit_buf + offset;
  2222. pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
  2223. pcap->header.length = sizeof(*pcap);
  2224. pcap->highest_capability = 1;
  2225. pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
  2226. offset += pcap->header.length;
  2227. if (t->setup_hotplug) {
  2228. /* dcr-descriptor4: blk */
  2229. dcr = nfit_buf + offset;
  2230. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2231. dcr->header.length = sizeof(*dcr);
  2232. dcr->region_index = 8+1;
  2233. dcr_common_init(dcr);
  2234. dcr->serial_number = ~handle[4];
  2235. dcr->code = NFIT_FIC_BLK;
  2236. dcr->windows = 1;
  2237. dcr->window_size = DCR_SIZE;
  2238. dcr->command_offset = 0;
  2239. dcr->command_size = 8;
  2240. dcr->status_offset = 8;
  2241. dcr->status_size = 4;
  2242. offset += dcr->header.length;
  2243. /* dcr-descriptor4: pmem */
  2244. dcr = nfit_buf + offset;
  2245. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2246. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2247. window_size);
  2248. dcr->region_index = 9+1;
  2249. dcr_common_init(dcr);
  2250. dcr->serial_number = ~handle[4];
  2251. dcr->code = NFIT_FIC_BYTEN;
  2252. dcr->windows = 0;
  2253. offset += dcr->header.length;
  2254. /* bdw4 (spa/dcr4, dimm4) */
  2255. bdw = nfit_buf + offset;
  2256. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  2257. bdw->header.length = sizeof(*bdw);
  2258. bdw->region_index = 8+1;
  2259. bdw->windows = 1;
  2260. bdw->offset = 0;
  2261. bdw->size = BDW_SIZE;
  2262. bdw->capacity = DIMM_SIZE;
  2263. bdw->start_address = 0;
  2264. offset += bdw->header.length;
  2265. /* spa10 (dcr4) dimm4 */
  2266. spa = nfit_buf + offset;
  2267. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  2268. spa->header.length = sizeof_spa(spa);
  2269. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  2270. spa->range_index = 10+1;
  2271. spa->address = t->dcr_dma[4];
  2272. spa->length = DCR_SIZE;
  2273. offset += spa->header.length;
  2274. /*
  2275. * spa11 (single-dimm interleave for hotplug, note storage
  2276. * does not actually alias the related block-data-window
  2277. * regions)
  2278. */
  2279. spa = nfit_buf + offset;
  2280. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  2281. spa->header.length = sizeof_spa(spa);
  2282. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  2283. spa->range_index = 11+1;
  2284. spa->address = t->spa_set_dma[2];
  2285. spa->length = SPA0_SIZE;
  2286. offset += spa->header.length;
  2287. /* spa12 (bdw for dcr4) dimm4 */
  2288. spa = nfit_buf + offset;
  2289. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  2290. spa->header.length = sizeof_spa(spa);
  2291. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  2292. spa->range_index = 12+1;
  2293. spa->address = t->dimm_dma[4];
  2294. spa->length = DIMM_SIZE;
  2295. offset += spa->header.length;
  2296. /* mem-region14 (spa/dcr4, dimm4) */
  2297. memdev = nfit_buf + offset;
  2298. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2299. memdev->header.length = sizeof(*memdev);
  2300. memdev->device_handle = handle[4];
  2301. memdev->physical_id = 4;
  2302. memdev->region_id = 0;
  2303. memdev->range_index = 10+1;
  2304. memdev->region_index = 8+1;
  2305. memdev->region_size = 0;
  2306. memdev->region_offset = 0;
  2307. memdev->address = 0;
  2308. memdev->interleave_index = 0;
  2309. memdev->interleave_ways = 1;
  2310. offset += memdev->header.length;
  2311. /* mem-region15 (spa11, dimm4) */
  2312. memdev = nfit_buf + offset;
  2313. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2314. memdev->header.length = sizeof(*memdev);
  2315. memdev->device_handle = handle[4];
  2316. memdev->physical_id = 4;
  2317. memdev->region_id = 0;
  2318. memdev->range_index = 11+1;
  2319. memdev->region_index = 9+1;
  2320. memdev->region_size = SPA0_SIZE;
  2321. memdev->region_offset = (1ULL << 48);
  2322. memdev->address = 0;
  2323. memdev->interleave_index = 0;
  2324. memdev->interleave_ways = 1;
  2325. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  2326. offset += memdev->header.length;
  2327. /* mem-region16 (spa/bdw4, dimm4) */
  2328. memdev = nfit_buf + offset;
  2329. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2330. memdev->header.length = sizeof(*memdev);
  2331. memdev->device_handle = handle[4];
  2332. memdev->physical_id = 4;
  2333. memdev->region_id = 0;
  2334. memdev->range_index = 12+1;
  2335. memdev->region_index = 8+1;
  2336. memdev->region_size = 0;
  2337. memdev->region_offset = 0;
  2338. memdev->address = 0;
  2339. memdev->interleave_index = 0;
  2340. memdev->interleave_ways = 1;
  2341. offset += memdev->header.length;
  2342. /* flush3 (dimm4) */
  2343. flush = nfit_buf + offset;
  2344. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  2345. flush->header.length = flush_hint_size;
  2346. flush->device_handle = handle[4];
  2347. flush->hint_count = NUM_HINTS;
  2348. for (i = 0; i < NUM_HINTS; i++)
  2349. flush->hint_address[i] = t->flush_dma[4]
  2350. + i * sizeof(u64);
  2351. offset += flush->header.length;
  2352. /* sanity check to make sure we've filled the buffer */
  2353. WARN_ON(offset != t->nfit_size);
  2354. }
  2355. t->nfit_filled = offset;
  2356. post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
  2357. SPA0_SIZE);
  2358. acpi_desc = &t->acpi_desc;
  2359. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  2360. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  2361. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  2362. set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
  2363. set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  2364. set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  2365. set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en);
  2366. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  2367. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  2368. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  2369. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  2370. set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
  2371. set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_dsm_mask);
  2372. set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_dsm_mask);
  2373. set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_dsm_mask);
  2374. set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_dsm_mask);
  2375. set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en);
  2376. set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en);
  2377. set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en);
  2378. set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en);
  2379. set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en);
  2380. set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
  2381. set_bit(NVDIMM_INTEL_GET_SECURITY_STATE,
  2382. &acpi_desc->dimm_cmd_force_en);
  2383. set_bit(NVDIMM_INTEL_SET_PASSPHRASE, &acpi_desc->dimm_cmd_force_en);
  2384. set_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE,
  2385. &acpi_desc->dimm_cmd_force_en);
  2386. set_bit(NVDIMM_INTEL_UNLOCK_UNIT, &acpi_desc->dimm_cmd_force_en);
  2387. set_bit(NVDIMM_INTEL_FREEZE_LOCK, &acpi_desc->dimm_cmd_force_en);
  2388. set_bit(NVDIMM_INTEL_SECURE_ERASE, &acpi_desc->dimm_cmd_force_en);
  2389. set_bit(NVDIMM_INTEL_OVERWRITE, &acpi_desc->dimm_cmd_force_en);
  2390. set_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &acpi_desc->dimm_cmd_force_en);
  2391. set_bit(NVDIMM_INTEL_SET_MASTER_PASSPHRASE,
  2392. &acpi_desc->dimm_cmd_force_en);
  2393. set_bit(NVDIMM_INTEL_MASTER_SECURE_ERASE,
  2394. &acpi_desc->dimm_cmd_force_en);
  2395. set_bit(NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, &acpi_desc->dimm_cmd_force_en);
  2396. set_bit(NVDIMM_INTEL_FW_ACTIVATE_ARM, &acpi_desc->dimm_cmd_force_en);
  2397. acpi_mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
  2398. set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, acpi_mask);
  2399. set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE, acpi_mask);
  2400. }
  2401. static void nfit_test1_setup(struct nfit_test *t)
  2402. {
  2403. size_t offset;
  2404. void *nfit_buf = t->nfit_buf;
  2405. struct acpi_nfit_memory_map *memdev;
  2406. struct acpi_nfit_control_region *dcr;
  2407. struct acpi_nfit_system_address *spa;
  2408. struct acpi_nfit_desc *acpi_desc;
  2409. offset = 0;
  2410. /* spa0 (flat range with no bdw aliasing) */
  2411. spa = nfit_buf + offset;
  2412. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  2413. spa->header.length = sizeof_spa(spa);
  2414. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  2415. spa->range_index = 0+1;
  2416. spa->address = t->spa_set_dma[0];
  2417. spa->length = SPA2_SIZE;
  2418. offset += spa->header.length;
  2419. /* virtual cd region */
  2420. spa = nfit_buf + offset;
  2421. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  2422. spa->header.length = sizeof_spa(spa);
  2423. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
  2424. spa->range_index = 0;
  2425. spa->address = t->spa_set_dma[1];
  2426. spa->length = SPA_VCD_SIZE;
  2427. offset += spa->header.length;
  2428. /* mem-region0 (spa0, dimm0) */
  2429. memdev = nfit_buf + offset;
  2430. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2431. memdev->header.length = sizeof(*memdev);
  2432. memdev->device_handle = handle[5];
  2433. memdev->physical_id = 0;
  2434. memdev->region_id = 0;
  2435. memdev->range_index = 0+1;
  2436. memdev->region_index = 0+1;
  2437. memdev->region_size = SPA2_SIZE;
  2438. memdev->region_offset = 0;
  2439. memdev->address = 0;
  2440. memdev->interleave_index = 0;
  2441. memdev->interleave_ways = 1;
  2442. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  2443. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  2444. | ACPI_NFIT_MEM_NOT_ARMED;
  2445. offset += memdev->header.length;
  2446. /* dcr-descriptor0 */
  2447. dcr = nfit_buf + offset;
  2448. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2449. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2450. window_size);
  2451. dcr->region_index = 0+1;
  2452. dcr_common_init(dcr);
  2453. dcr->serial_number = ~handle[5];
  2454. dcr->code = NFIT_FIC_BYTE;
  2455. dcr->windows = 0;
  2456. offset += dcr->header.length;
  2457. memdev = nfit_buf + offset;
  2458. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2459. memdev->header.length = sizeof(*memdev);
  2460. memdev->device_handle = handle[6];
  2461. memdev->physical_id = 0;
  2462. memdev->region_id = 0;
  2463. memdev->range_index = 0;
  2464. memdev->region_index = 0+2;
  2465. memdev->region_size = SPA2_SIZE;
  2466. memdev->region_offset = 0;
  2467. memdev->address = 0;
  2468. memdev->interleave_index = 0;
  2469. memdev->interleave_ways = 1;
  2470. memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
  2471. offset += memdev->header.length;
  2472. /* dcr-descriptor1 */
  2473. dcr = nfit_buf + offset;
  2474. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2475. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2476. window_size);
  2477. dcr->region_index = 0+2;
  2478. dcr_common_init(dcr);
  2479. dcr->serial_number = ~handle[6];
  2480. dcr->code = NFIT_FIC_BYTE;
  2481. dcr->windows = 0;
  2482. offset += dcr->header.length;
  2483. /* sanity check to make sure we've filled the buffer */
  2484. WARN_ON(offset != t->nfit_size);
  2485. t->nfit_filled = offset;
  2486. post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
  2487. SPA2_SIZE);
  2488. acpi_desc = &t->acpi_desc;
  2489. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  2490. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  2491. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  2492. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  2493. set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
  2494. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  2495. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  2496. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  2497. }
  2498. static unsigned long nfit_ctl_handle;
  2499. union acpi_object *result;
  2500. static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
  2501. const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
  2502. {
  2503. if (handle != &nfit_ctl_handle)
  2504. return ERR_PTR(-ENXIO);
  2505. return result;
  2506. }
  2507. static int setup_result(void *buf, size_t size)
  2508. {
  2509. result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
  2510. if (!result)
  2511. return -ENOMEM;
  2512. result->package.type = ACPI_TYPE_BUFFER,
  2513. result->buffer.pointer = (void *) (result + 1);
  2514. result->buffer.length = size;
  2515. memcpy(result->buffer.pointer, buf, size);
  2516. memset(buf, 0, size);
  2517. return 0;
  2518. }
  2519. static int nfit_ctl_test(struct device *dev)
  2520. {
  2521. int rc, cmd_rc;
  2522. struct nvdimm *nvdimm;
  2523. struct acpi_device *adev;
  2524. struct nfit_mem *nfit_mem;
  2525. struct nd_ars_record *record;
  2526. struct acpi_nfit_desc *acpi_desc;
  2527. const u64 test_val = 0x0123456789abcdefULL;
  2528. unsigned long mask, cmd_size, offset;
  2529. struct nfit_ctl_test_cmd {
  2530. struct nd_cmd_pkg pkg;
  2531. union {
  2532. struct nd_cmd_get_config_size cfg_size;
  2533. struct nd_cmd_clear_error clear_err;
  2534. struct nd_cmd_ars_status ars_stat;
  2535. struct nd_cmd_ars_cap ars_cap;
  2536. struct nd_intel_bus_fw_activate_businfo fwa_info;
  2537. char buf[sizeof(struct nd_cmd_ars_status)
  2538. + sizeof(struct nd_ars_record)];
  2539. };
  2540. } cmd;
  2541. adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
  2542. if (!adev)
  2543. return -ENOMEM;
  2544. *adev = (struct acpi_device) {
  2545. .handle = &nfit_ctl_handle,
  2546. .dev = {
  2547. .init_name = "test-adev",
  2548. },
  2549. };
  2550. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2551. if (!acpi_desc)
  2552. return -ENOMEM;
  2553. *acpi_desc = (struct acpi_nfit_desc) {
  2554. .nd_desc = {
  2555. .cmd_mask = 1UL << ND_CMD_ARS_CAP
  2556. | 1UL << ND_CMD_ARS_START
  2557. | 1UL << ND_CMD_ARS_STATUS
  2558. | 1UL << ND_CMD_CLEAR_ERROR
  2559. | 1UL << ND_CMD_CALL,
  2560. .module = THIS_MODULE,
  2561. .provider_name = "ACPI.NFIT",
  2562. .ndctl = acpi_nfit_ctl,
  2563. .bus_family_mask = 1UL << NVDIMM_BUS_FAMILY_NFIT
  2564. | 1UL << NVDIMM_BUS_FAMILY_INTEL,
  2565. },
  2566. .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
  2567. | 1UL << NFIT_CMD_ARS_INJECT_SET
  2568. | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
  2569. | 1UL << NFIT_CMD_ARS_INJECT_GET,
  2570. .family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL] =
  2571. NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK,
  2572. .dev = &adev->dev,
  2573. };
  2574. nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
  2575. if (!nfit_mem)
  2576. return -ENOMEM;
  2577. mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
  2578. | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
  2579. | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
  2580. | 1UL << ND_CMD_VENDOR;
  2581. *nfit_mem = (struct nfit_mem) {
  2582. .adev = adev,
  2583. .family = NVDIMM_FAMILY_INTEL,
  2584. .dsm_mask = mask,
  2585. };
  2586. nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
  2587. if (!nvdimm)
  2588. return -ENOMEM;
  2589. *nvdimm = (struct nvdimm) {
  2590. .provider_data = nfit_mem,
  2591. .cmd_mask = mask,
  2592. .dev = {
  2593. .init_name = "test-dimm",
  2594. },
  2595. };
  2596. /* basic checkout of a typical 'get config size' command */
  2597. cmd_size = sizeof(cmd.cfg_size);
  2598. cmd.cfg_size = (struct nd_cmd_get_config_size) {
  2599. .status = 0,
  2600. .config_size = SZ_128K,
  2601. .max_xfer = SZ_4K,
  2602. };
  2603. rc = setup_result(cmd.buf, cmd_size);
  2604. if (rc)
  2605. return rc;
  2606. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  2607. cmd.buf, cmd_size, &cmd_rc);
  2608. if (rc < 0 || cmd_rc || cmd.cfg_size.status != 0
  2609. || cmd.cfg_size.config_size != SZ_128K
  2610. || cmd.cfg_size.max_xfer != SZ_4K) {
  2611. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2612. __func__, __LINE__, rc, cmd_rc);
  2613. return -EIO;
  2614. }
  2615. /* test ars_status with zero output */
  2616. cmd_size = offsetof(struct nd_cmd_ars_status, address);
  2617. cmd.ars_stat = (struct nd_cmd_ars_status) {
  2618. .out_length = 0,
  2619. };
  2620. rc = setup_result(cmd.buf, cmd_size);
  2621. if (rc)
  2622. return rc;
  2623. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  2624. cmd.buf, cmd_size, &cmd_rc);
  2625. if (rc < 0 || cmd_rc) {
  2626. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2627. __func__, __LINE__, rc, cmd_rc);
  2628. return -EIO;
  2629. }
  2630. /* test ars_cap with benign extended status */
  2631. cmd_size = sizeof(cmd.ars_cap);
  2632. cmd.ars_cap = (struct nd_cmd_ars_cap) {
  2633. .status = ND_ARS_PERSISTENT << 16,
  2634. };
  2635. offset = offsetof(struct nd_cmd_ars_cap, status);
  2636. rc = setup_result(cmd.buf + offset, cmd_size - offset);
  2637. if (rc)
  2638. return rc;
  2639. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
  2640. cmd.buf, cmd_size, &cmd_rc);
  2641. if (rc < 0 || cmd_rc) {
  2642. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2643. __func__, __LINE__, rc, cmd_rc);
  2644. return -EIO;
  2645. }
  2646. /* test ars_status with 'status' trimmed from 'out_length' */
  2647. cmd_size = sizeof(cmd.ars_stat) + sizeof(struct nd_ars_record);
  2648. cmd.ars_stat = (struct nd_cmd_ars_status) {
  2649. .out_length = cmd_size - 4,
  2650. };
  2651. record = &cmd.ars_stat.records[0];
  2652. *record = (struct nd_ars_record) {
  2653. .length = test_val,
  2654. };
  2655. rc = setup_result(cmd.buf, cmd_size);
  2656. if (rc)
  2657. return rc;
  2658. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  2659. cmd.buf, cmd_size, &cmd_rc);
  2660. if (rc < 0 || cmd_rc || record->length != test_val) {
  2661. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2662. __func__, __LINE__, rc, cmd_rc);
  2663. return -EIO;
  2664. }
  2665. /* test ars_status with 'Output (Size)' including 'status' */
  2666. cmd_size = sizeof(cmd.ars_stat) + sizeof(struct nd_ars_record);
  2667. cmd.ars_stat = (struct nd_cmd_ars_status) {
  2668. .out_length = cmd_size,
  2669. };
  2670. record = &cmd.ars_stat.records[0];
  2671. *record = (struct nd_ars_record) {
  2672. .length = test_val,
  2673. };
  2674. rc = setup_result(cmd.buf, cmd_size);
  2675. if (rc)
  2676. return rc;
  2677. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  2678. cmd.buf, cmd_size, &cmd_rc);
  2679. if (rc < 0 || cmd_rc || record->length != test_val) {
  2680. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2681. __func__, __LINE__, rc, cmd_rc);
  2682. return -EIO;
  2683. }
  2684. /* test extended status for get_config_size results in failure */
  2685. cmd_size = sizeof(cmd.cfg_size);
  2686. cmd.cfg_size = (struct nd_cmd_get_config_size) {
  2687. .status = 1 << 16,
  2688. };
  2689. rc = setup_result(cmd.buf, cmd_size);
  2690. if (rc)
  2691. return rc;
  2692. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  2693. cmd.buf, cmd_size, &cmd_rc);
  2694. if (rc < 0 || cmd_rc >= 0) {
  2695. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2696. __func__, __LINE__, rc, cmd_rc);
  2697. return -EIO;
  2698. }
  2699. /* test clear error */
  2700. cmd_size = sizeof(cmd.clear_err);
  2701. cmd.clear_err = (struct nd_cmd_clear_error) {
  2702. .length = 512,
  2703. .cleared = 512,
  2704. };
  2705. rc = setup_result(cmd.buf, cmd_size);
  2706. if (rc)
  2707. return rc;
  2708. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
  2709. cmd.buf, cmd_size, &cmd_rc);
  2710. if (rc < 0 || cmd_rc) {
  2711. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2712. __func__, __LINE__, rc, cmd_rc);
  2713. return -EIO;
  2714. }
  2715. /* test firmware activate bus info */
  2716. cmd_size = sizeof(cmd.fwa_info);
  2717. cmd = (struct nfit_ctl_test_cmd) {
  2718. .pkg = {
  2719. .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
  2720. .nd_family = NVDIMM_BUS_FAMILY_INTEL,
  2721. .nd_size_out = cmd_size,
  2722. .nd_fw_size = cmd_size,
  2723. },
  2724. .fwa_info = {
  2725. .state = ND_INTEL_FWA_IDLE,
  2726. .capability = ND_INTEL_BUS_FWA_CAP_FWQUIESCE
  2727. | ND_INTEL_BUS_FWA_CAP_OSQUIESCE,
  2728. .activate_tmo = 1,
  2729. .cpu_quiesce_tmo = 1,
  2730. .io_quiesce_tmo = 1,
  2731. .max_quiesce_tmo = 1,
  2732. },
  2733. };
  2734. rc = setup_result(cmd.buf, cmd_size);
  2735. if (rc)
  2736. return rc;
  2737. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CALL,
  2738. &cmd, sizeof(cmd.pkg) + cmd_size, &cmd_rc);
  2739. if (rc < 0 || cmd_rc) {
  2740. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2741. __func__, __LINE__, rc, cmd_rc);
  2742. return -EIO;
  2743. }
  2744. return 0;
  2745. }
  2746. static int nfit_test_probe(struct platform_device *pdev)
  2747. {
  2748. struct nvdimm_bus_descriptor *nd_desc;
  2749. struct acpi_nfit_desc *acpi_desc;
  2750. struct device *dev = &pdev->dev;
  2751. struct nfit_test *nfit_test;
  2752. struct nfit_mem *nfit_mem;
  2753. union acpi_object *obj;
  2754. int rc;
  2755. if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
  2756. rc = nfit_ctl_test(&pdev->dev);
  2757. if (rc)
  2758. return rc;
  2759. }
  2760. nfit_test = to_nfit_test(&pdev->dev);
  2761. /* common alloc */
  2762. if (nfit_test->num_dcr) {
  2763. int num = nfit_test->num_dcr;
  2764. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  2765. GFP_KERNEL);
  2766. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  2767. GFP_KERNEL);
  2768. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  2769. GFP_KERNEL);
  2770. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  2771. GFP_KERNEL);
  2772. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  2773. GFP_KERNEL);
  2774. nfit_test->label_dma = devm_kcalloc(dev, num,
  2775. sizeof(dma_addr_t), GFP_KERNEL);
  2776. nfit_test->dcr = devm_kcalloc(dev, num,
  2777. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  2778. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  2779. sizeof(dma_addr_t), GFP_KERNEL);
  2780. nfit_test->smart = devm_kcalloc(dev, num,
  2781. sizeof(struct nd_intel_smart), GFP_KERNEL);
  2782. nfit_test->smart_threshold = devm_kcalloc(dev, num,
  2783. sizeof(struct nd_intel_smart_threshold),
  2784. GFP_KERNEL);
  2785. nfit_test->fw = devm_kcalloc(dev, num,
  2786. sizeof(struct nfit_test_fw), GFP_KERNEL);
  2787. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  2788. && nfit_test->label_dma && nfit_test->dcr
  2789. && nfit_test->dcr_dma && nfit_test->flush
  2790. && nfit_test->flush_dma
  2791. && nfit_test->fw)
  2792. /* pass */;
  2793. else
  2794. return -ENOMEM;
  2795. }
  2796. if (nfit_test->num_pm) {
  2797. int num = nfit_test->num_pm;
  2798. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  2799. GFP_KERNEL);
  2800. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  2801. sizeof(dma_addr_t), GFP_KERNEL);
  2802. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  2803. /* pass */;
  2804. else
  2805. return -ENOMEM;
  2806. }
  2807. /* per-nfit specific alloc */
  2808. if (nfit_test->alloc(nfit_test))
  2809. return -ENOMEM;
  2810. nfit_test->setup(nfit_test);
  2811. acpi_desc = &nfit_test->acpi_desc;
  2812. acpi_nfit_desc_init(acpi_desc, &pdev->dev);
  2813. nd_desc = &acpi_desc->nd_desc;
  2814. nd_desc->provider_name = NULL;
  2815. nd_desc->module = THIS_MODULE;
  2816. nd_desc->ndctl = nfit_test_ctl;
  2817. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
  2818. nfit_test->nfit_filled);
  2819. if (rc)
  2820. return rc;
  2821. rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
  2822. if (rc)
  2823. return rc;
  2824. if (nfit_test->setup != nfit_test0_setup)
  2825. return 0;
  2826. nfit_test->setup_hotplug = 1;
  2827. nfit_test->setup(nfit_test);
  2828. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  2829. if (!obj)
  2830. return -ENOMEM;
  2831. obj->type = ACPI_TYPE_BUFFER;
  2832. obj->buffer.length = nfit_test->nfit_size;
  2833. obj->buffer.pointer = nfit_test->nfit_buf;
  2834. *(nfit_test->_fit) = obj;
  2835. __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
  2836. /* associate dimm devices with nfit_mem data for notification testing */
  2837. mutex_lock(&acpi_desc->init_mutex);
  2838. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  2839. u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  2840. int i;
  2841. for (i = 0; i < ARRAY_SIZE(handle); i++)
  2842. if (nfit_handle == handle[i])
  2843. dev_set_drvdata(nfit_test->dimm_dev[i],
  2844. nfit_mem);
  2845. }
  2846. mutex_unlock(&acpi_desc->init_mutex);
  2847. return 0;
  2848. }
  2849. static int nfit_test_remove(struct platform_device *pdev)
  2850. {
  2851. return 0;
  2852. }
  2853. static void nfit_test_release(struct device *dev)
  2854. {
  2855. struct nfit_test *nfit_test = to_nfit_test(dev);
  2856. kfree(nfit_test);
  2857. }
  2858. static const struct platform_device_id nfit_test_id[] = {
  2859. { KBUILD_MODNAME },
  2860. { },
  2861. };
  2862. static struct platform_driver nfit_test_driver = {
  2863. .probe = nfit_test_probe,
  2864. .remove = nfit_test_remove,
  2865. .driver = {
  2866. .name = KBUILD_MODNAME,
  2867. },
  2868. .id_table = nfit_test_id,
  2869. };
  2870. static __init int nfit_test_init(void)
  2871. {
  2872. int rc, i;
  2873. pmem_test();
  2874. libnvdimm_test();
  2875. acpi_nfit_test();
  2876. device_dax_test();
  2877. dax_pmem_test();
  2878. nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
  2879. nfit_wq = create_singlethread_workqueue("nfit");
  2880. if (!nfit_wq)
  2881. return -ENOMEM;
  2882. nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
  2883. if (IS_ERR(nfit_test_dimm)) {
  2884. rc = PTR_ERR(nfit_test_dimm);
  2885. goto err_register;
  2886. }
  2887. nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
  2888. if (!nfit_pool) {
  2889. rc = -ENOMEM;
  2890. goto err_register;
  2891. }
  2892. if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
  2893. rc = -ENOMEM;
  2894. goto err_register;
  2895. }
  2896. for (i = 0; i < NUM_NFITS; i++) {
  2897. struct nfit_test *nfit_test;
  2898. struct platform_device *pdev;
  2899. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  2900. if (!nfit_test) {
  2901. rc = -ENOMEM;
  2902. goto err_register;
  2903. }
  2904. INIT_LIST_HEAD(&nfit_test->resources);
  2905. badrange_init(&nfit_test->badrange);
  2906. switch (i) {
  2907. case 0:
  2908. nfit_test->num_pm = NUM_PM;
  2909. nfit_test->dcr_idx = 0;
  2910. nfit_test->num_dcr = NUM_DCR;
  2911. nfit_test->alloc = nfit_test0_alloc;
  2912. nfit_test->setup = nfit_test0_setup;
  2913. break;
  2914. case 1:
  2915. nfit_test->num_pm = 2;
  2916. nfit_test->dcr_idx = NUM_DCR;
  2917. nfit_test->num_dcr = 2;
  2918. nfit_test->alloc = nfit_test1_alloc;
  2919. nfit_test->setup = nfit_test1_setup;
  2920. break;
  2921. default:
  2922. rc = -EINVAL;
  2923. goto err_register;
  2924. }
  2925. pdev = &nfit_test->pdev;
  2926. pdev->name = KBUILD_MODNAME;
  2927. pdev->id = i;
  2928. pdev->dev.release = nfit_test_release;
  2929. rc = platform_device_register(pdev);
  2930. if (rc) {
  2931. put_device(&pdev->dev);
  2932. goto err_register;
  2933. }
  2934. get_device(&pdev->dev);
  2935. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2936. if (rc)
  2937. goto err_register;
  2938. instances[i] = nfit_test;
  2939. INIT_WORK(&nfit_test->work, uc_error_notify);
  2940. }
  2941. rc = platform_driver_register(&nfit_test_driver);
  2942. if (rc)
  2943. goto err_register;
  2944. return 0;
  2945. err_register:
  2946. if (nfit_pool)
  2947. gen_pool_destroy(nfit_pool);
  2948. destroy_workqueue(nfit_wq);
  2949. for (i = 0; i < NUM_NFITS; i++)
  2950. if (instances[i])
  2951. platform_device_unregister(&instances[i]->pdev);
  2952. nfit_test_teardown();
  2953. for (i = 0; i < NUM_NFITS; i++)
  2954. if (instances[i])
  2955. put_device(&instances[i]->pdev.dev);
  2956. return rc;
  2957. }
  2958. static __exit void nfit_test_exit(void)
  2959. {
  2960. int i;
  2961. destroy_workqueue(nfit_wq);
  2962. for (i = 0; i < NUM_NFITS; i++)
  2963. platform_device_unregister(&instances[i]->pdev);
  2964. platform_driver_unregister(&nfit_test_driver);
  2965. nfit_test_teardown();
  2966. gen_pool_destroy(nfit_pool);
  2967. for (i = 0; i < NUM_NFITS; i++)
  2968. put_device(&instances[i]->pdev.dev);
  2969. class_destroy(nfit_test_dimm);
  2970. }
  2971. module_init(nfit_test_init);
  2972. module_exit(nfit_test_exit);
  2973. MODULE_LICENSE("GPL v2");
  2974. MODULE_AUTHOR("Intel Corporation");