interrupt.c 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * handling kvm guest interrupts
  4. *
  5. * Copyright IBM Corp. 2008, 2020
  6. *
  7. * Author(s): Carsten Otte <[email protected]>
  8. */
  9. #define KMSG_COMPONENT "kvm-s390"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/interrupt.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/hrtimer.h>
  14. #include <linux/mmu_context.h>
  15. #include <linux/nospec.h>
  16. #include <linux/signal.h>
  17. #include <linux/slab.h>
  18. #include <linux/bitmap.h>
  19. #include <linux/vmalloc.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/dis.h>
  22. #include <linux/uaccess.h>
  23. #include <asm/sclp.h>
  24. #include <asm/isc.h>
  25. #include <asm/gmap.h>
  26. #include <asm/switch_to.h>
  27. #include <asm/nmi.h>
  28. #include <asm/airq.h>
  29. #include <asm/tpi.h>
  30. #include "kvm-s390.h"
  31. #include "gaccess.h"
  32. #include "trace-s390.h"
  33. #include "pci.h"
  34. #define PFAULT_INIT 0x0600
  35. #define PFAULT_DONE 0x0680
  36. #define VIRTIO_PARAM 0x0d00
  37. static struct kvm_s390_gib *gib;
  38. /* handle external calls via sigp interpretation facility */
  39. static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
  40. {
  41. int c, scn;
  42. if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
  43. return 0;
  44. BUG_ON(!kvm_s390_use_sca_entries());
  45. read_lock(&vcpu->kvm->arch.sca_lock);
  46. if (vcpu->kvm->arch.use_esca) {
  47. struct esca_block *sca = vcpu->kvm->arch.sca;
  48. union esca_sigp_ctrl sigp_ctrl =
  49. sca->cpu[vcpu->vcpu_id].sigp_ctrl;
  50. c = sigp_ctrl.c;
  51. scn = sigp_ctrl.scn;
  52. } else {
  53. struct bsca_block *sca = vcpu->kvm->arch.sca;
  54. union bsca_sigp_ctrl sigp_ctrl =
  55. sca->cpu[vcpu->vcpu_id].sigp_ctrl;
  56. c = sigp_ctrl.c;
  57. scn = sigp_ctrl.scn;
  58. }
  59. read_unlock(&vcpu->kvm->arch.sca_lock);
  60. if (src_id)
  61. *src_id = scn;
  62. return c;
  63. }
  64. static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
  65. {
  66. int expect, rc;
  67. BUG_ON(!kvm_s390_use_sca_entries());
  68. read_lock(&vcpu->kvm->arch.sca_lock);
  69. if (vcpu->kvm->arch.use_esca) {
  70. struct esca_block *sca = vcpu->kvm->arch.sca;
  71. union esca_sigp_ctrl *sigp_ctrl =
  72. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  73. union esca_sigp_ctrl new_val = {0}, old_val;
  74. old_val = READ_ONCE(*sigp_ctrl);
  75. new_val.scn = src_id;
  76. new_val.c = 1;
  77. old_val.c = 0;
  78. expect = old_val.value;
  79. rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
  80. } else {
  81. struct bsca_block *sca = vcpu->kvm->arch.sca;
  82. union bsca_sigp_ctrl *sigp_ctrl =
  83. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  84. union bsca_sigp_ctrl new_val = {0}, old_val;
  85. old_val = READ_ONCE(*sigp_ctrl);
  86. new_val.scn = src_id;
  87. new_val.c = 1;
  88. old_val.c = 0;
  89. expect = old_val.value;
  90. rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
  91. }
  92. read_unlock(&vcpu->kvm->arch.sca_lock);
  93. if (rc != expect) {
  94. /* another external call is pending */
  95. return -EBUSY;
  96. }
  97. kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
  98. return 0;
  99. }
  100. static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
  101. {
  102. int rc, expect;
  103. if (!kvm_s390_use_sca_entries())
  104. return;
  105. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
  106. read_lock(&vcpu->kvm->arch.sca_lock);
  107. if (vcpu->kvm->arch.use_esca) {
  108. struct esca_block *sca = vcpu->kvm->arch.sca;
  109. union esca_sigp_ctrl *sigp_ctrl =
  110. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  111. union esca_sigp_ctrl old;
  112. old = READ_ONCE(*sigp_ctrl);
  113. expect = old.value;
  114. rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
  115. } else {
  116. struct bsca_block *sca = vcpu->kvm->arch.sca;
  117. union bsca_sigp_ctrl *sigp_ctrl =
  118. &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
  119. union bsca_sigp_ctrl old;
  120. old = READ_ONCE(*sigp_ctrl);
  121. expect = old.value;
  122. rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
  123. }
  124. read_unlock(&vcpu->kvm->arch.sca_lock);
  125. WARN_ON(rc != expect); /* cannot clear? */
  126. }
  127. int psw_extint_disabled(struct kvm_vcpu *vcpu)
  128. {
  129. return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
  130. }
  131. static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
  132. {
  133. return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
  134. }
  135. static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
  136. {
  137. return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
  138. }
  139. static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
  140. {
  141. return psw_extint_disabled(vcpu) &&
  142. psw_ioint_disabled(vcpu) &&
  143. psw_mchk_disabled(vcpu);
  144. }
  145. static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
  146. {
  147. if (psw_extint_disabled(vcpu) ||
  148. !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
  149. return 0;
  150. if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
  151. /* No timer interrupts when single stepping */
  152. return 0;
  153. return 1;
  154. }
  155. static int ckc_irq_pending(struct kvm_vcpu *vcpu)
  156. {
  157. const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
  158. const u64 ckc = vcpu->arch.sie_block->ckc;
  159. if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
  160. if ((s64)ckc >= (s64)now)
  161. return 0;
  162. } else if (ckc >= now) {
  163. return 0;
  164. }
  165. return ckc_interrupts_enabled(vcpu);
  166. }
  167. static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
  168. {
  169. return !psw_extint_disabled(vcpu) &&
  170. (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
  171. }
  172. static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
  173. {
  174. if (!cpu_timer_interrupts_enabled(vcpu))
  175. return 0;
  176. return kvm_s390_get_cpu_timer(vcpu) >> 63;
  177. }
  178. static uint64_t isc_to_isc_bits(int isc)
  179. {
  180. return (0x80 >> isc) << 24;
  181. }
  182. static inline u32 isc_to_int_word(u8 isc)
  183. {
  184. return ((u32)isc << 27) | 0x80000000;
  185. }
  186. static inline u8 int_word_to_isc(u32 int_word)
  187. {
  188. return (int_word & 0x38000000) >> 27;
  189. }
  190. /*
  191. * To use atomic bitmap functions, we have to provide a bitmap address
  192. * that is u64 aligned. However, the ipm might be u32 aligned.
  193. * Therefore, we logically start the bitmap at the very beginning of the
  194. * struct and fixup the bit number.
  195. */
  196. #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
  197. /**
  198. * gisa_set_iam - change the GISA interruption alert mask
  199. *
  200. * @gisa: gisa to operate on
  201. * @iam: new IAM value to use
  202. *
  203. * Change the IAM atomically with the next alert address and the IPM
  204. * of the GISA if the GISA is not part of the GIB alert list. All three
  205. * fields are located in the first long word of the GISA.
  206. *
  207. * Returns: 0 on success
  208. * -EBUSY in case the gisa is part of the alert list
  209. */
  210. static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
  211. {
  212. u64 word, _word;
  213. do {
  214. word = READ_ONCE(gisa->u64.word[0]);
  215. if ((u64)gisa != word >> 32)
  216. return -EBUSY;
  217. _word = (word & ~0xffUL) | iam;
  218. } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
  219. return 0;
  220. }
  221. /**
  222. * gisa_clear_ipm - clear the GISA interruption pending mask
  223. *
  224. * @gisa: gisa to operate on
  225. *
  226. * Clear the IPM atomically with the next alert address and the IAM
  227. * of the GISA unconditionally. All three fields are located in the
  228. * first long word of the GISA.
  229. */
  230. static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
  231. {
  232. u64 word, _word;
  233. do {
  234. word = READ_ONCE(gisa->u64.word[0]);
  235. _word = word & ~(0xffUL << 24);
  236. } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
  237. }
  238. /**
  239. * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
  240. *
  241. * @gi: gisa interrupt struct to work on
  242. *
  243. * Atomically restores the interruption alert mask if none of the
  244. * relevant ISCs are pending and return the IPM.
  245. *
  246. * Returns: the relevant pending ISCs
  247. */
  248. static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
  249. {
  250. u8 pending_mask, alert_mask;
  251. u64 word, _word;
  252. do {
  253. word = READ_ONCE(gi->origin->u64.word[0]);
  254. alert_mask = READ_ONCE(gi->alert.mask);
  255. pending_mask = (u8)(word >> 24) & alert_mask;
  256. if (pending_mask)
  257. return pending_mask;
  258. _word = (word & ~0xffUL) | alert_mask;
  259. } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
  260. return 0;
  261. }
  262. static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
  263. {
  264. return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
  265. }
  266. static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
  267. {
  268. set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
  269. }
  270. static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
  271. {
  272. return READ_ONCE(gisa->ipm);
  273. }
  274. static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
  275. {
  276. clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
  277. }
  278. static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
  279. {
  280. return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
  281. }
  282. static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
  283. {
  284. unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
  285. vcpu->arch.local_int.pending_irqs;
  286. pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
  287. return pending;
  288. }
  289. static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
  290. {
  291. struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
  292. unsigned long pending_mask;
  293. pending_mask = pending_irqs_no_gisa(vcpu);
  294. if (gi->origin)
  295. pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
  296. return pending_mask;
  297. }
  298. static inline int isc_to_irq_type(unsigned long isc)
  299. {
  300. return IRQ_PEND_IO_ISC_0 - isc;
  301. }
  302. static inline int irq_type_to_isc(unsigned long irq_type)
  303. {
  304. return IRQ_PEND_IO_ISC_0 - irq_type;
  305. }
  306. static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
  307. unsigned long active_mask)
  308. {
  309. int i;
  310. for (i = 0; i <= MAX_ISC; i++)
  311. if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
  312. active_mask &= ~(1UL << (isc_to_irq_type(i)));
  313. return active_mask;
  314. }
  315. static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
  316. {
  317. unsigned long active_mask;
  318. active_mask = pending_irqs(vcpu);
  319. if (!active_mask)
  320. return 0;
  321. if (psw_extint_disabled(vcpu))
  322. active_mask &= ~IRQ_PEND_EXT_MASK;
  323. if (psw_ioint_disabled(vcpu))
  324. active_mask &= ~IRQ_PEND_IO_MASK;
  325. else
  326. active_mask = disable_iscs(vcpu, active_mask);
  327. if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
  328. __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
  329. if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
  330. __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
  331. if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
  332. __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
  333. if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
  334. __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
  335. if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
  336. __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
  337. __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
  338. }
  339. if (psw_mchk_disabled(vcpu))
  340. active_mask &= ~IRQ_PEND_MCHK_MASK;
  341. /* PV guest cpus can have a single interruption injected at a time. */
  342. if (kvm_s390_pv_cpu_get_handle(vcpu) &&
  343. vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
  344. active_mask &= ~(IRQ_PEND_EXT_II_MASK |
  345. IRQ_PEND_IO_MASK |
  346. IRQ_PEND_MCHK_MASK);
  347. /*
  348. * Check both floating and local interrupt's cr14 because
  349. * bit IRQ_PEND_MCHK_REP could be set in both cases.
  350. */
  351. if (!(vcpu->arch.sie_block->gcr[14] &
  352. (vcpu->kvm->arch.float_int.mchk.cr14 |
  353. vcpu->arch.local_int.irq.mchk.cr14)))
  354. __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
  355. /*
  356. * STOP irqs will never be actively delivered. They are triggered via
  357. * intercept requests and cleared when the stop intercept is performed.
  358. */
  359. __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
  360. return active_mask;
  361. }
  362. static void __set_cpu_idle(struct kvm_vcpu *vcpu)
  363. {
  364. kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
  365. set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
  366. }
  367. static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
  368. {
  369. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
  370. clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
  371. }
  372. static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
  373. {
  374. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
  375. CPUSTAT_STOP_INT);
  376. vcpu->arch.sie_block->lctl = 0x0000;
  377. vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
  378. if (guestdbg_enabled(vcpu)) {
  379. vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
  380. LCTL_CR10 | LCTL_CR11);
  381. vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
  382. }
  383. }
  384. static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
  385. {
  386. if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
  387. return;
  388. if (psw_ioint_disabled(vcpu))
  389. kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
  390. else
  391. vcpu->arch.sie_block->lctl |= LCTL_CR6;
  392. }
  393. static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
  394. {
  395. if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
  396. return;
  397. if (psw_extint_disabled(vcpu))
  398. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  399. else
  400. vcpu->arch.sie_block->lctl |= LCTL_CR0;
  401. }
  402. static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
  403. {
  404. if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
  405. return;
  406. if (psw_mchk_disabled(vcpu))
  407. vcpu->arch.sie_block->ictl |= ICTL_LPSW;
  408. else
  409. vcpu->arch.sie_block->lctl |= LCTL_CR14;
  410. }
  411. static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
  412. {
  413. if (kvm_s390_is_stop_irq_pending(vcpu))
  414. kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
  415. }
  416. /* Set interception request for non-deliverable interrupts */
  417. static void set_intercept_indicators(struct kvm_vcpu *vcpu)
  418. {
  419. set_intercept_indicators_io(vcpu);
  420. set_intercept_indicators_ext(vcpu);
  421. set_intercept_indicators_mchk(vcpu);
  422. set_intercept_indicators_stop(vcpu);
  423. }
  424. static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
  425. {
  426. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  427. int rc = 0;
  428. vcpu->stat.deliver_cputm++;
  429. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
  430. 0, 0);
  431. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  432. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  433. vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
  434. } else {
  435. rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
  436. (u16 *)__LC_EXT_INT_CODE);
  437. rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
  438. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  439. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  440. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  441. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  442. }
  443. clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  444. return rc ? -EFAULT : 0;
  445. }
  446. static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
  447. {
  448. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  449. int rc = 0;
  450. vcpu->stat.deliver_ckc++;
  451. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
  452. 0, 0);
  453. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  454. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  455. vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
  456. } else {
  457. rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
  458. (u16 __user *)__LC_EXT_INT_CODE);
  459. rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
  460. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  461. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  462. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  463. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  464. }
  465. clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  466. return rc ? -EFAULT : 0;
  467. }
  468. static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
  469. {
  470. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  471. struct kvm_s390_ext_info ext;
  472. int rc;
  473. spin_lock(&li->lock);
  474. ext = li->irq.ext;
  475. clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
  476. li->irq.ext.ext_params2 = 0;
  477. spin_unlock(&li->lock);
  478. VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
  479. ext.ext_params2);
  480. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  481. KVM_S390_INT_PFAULT_INIT,
  482. 0, ext.ext_params2);
  483. rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
  484. rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
  485. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  486. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  487. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  488. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  489. rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
  490. return rc ? -EFAULT : 0;
  491. }
  492. static int __write_machine_check(struct kvm_vcpu *vcpu,
  493. struct kvm_s390_mchk_info *mchk)
  494. {
  495. unsigned long ext_sa_addr;
  496. unsigned long lc;
  497. freg_t fprs[NUM_FPRS];
  498. union mci mci;
  499. int rc;
  500. /*
  501. * All other possible payload for a machine check (e.g. the register
  502. * contents in the save area) will be handled by the ultravisor, as
  503. * the hypervisor does not not have the needed information for
  504. * protected guests.
  505. */
  506. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  507. vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
  508. vcpu->arch.sie_block->mcic = mchk->mcic;
  509. vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
  510. vcpu->arch.sie_block->edc = mchk->ext_damage_code;
  511. return 0;
  512. }
  513. mci.val = mchk->mcic;
  514. /* take care of lazy register loading */
  515. save_fpu_regs();
  516. save_access_regs(vcpu->run->s.regs.acrs);
  517. if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
  518. save_gs_cb(current->thread.gs_cb);
  519. /* Extended save area */
  520. rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
  521. sizeof(unsigned long));
  522. /* Only bits 0 through 63-LC are used for address formation */
  523. lc = ext_sa_addr & MCESA_LC_MASK;
  524. if (test_kvm_facility(vcpu->kvm, 133)) {
  525. switch (lc) {
  526. case 0:
  527. case 10:
  528. ext_sa_addr &= ~0x3ffUL;
  529. break;
  530. case 11:
  531. ext_sa_addr &= ~0x7ffUL;
  532. break;
  533. case 12:
  534. ext_sa_addr &= ~0xfffUL;
  535. break;
  536. default:
  537. ext_sa_addr = 0;
  538. break;
  539. }
  540. } else {
  541. ext_sa_addr &= ~0x3ffUL;
  542. }
  543. if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
  544. if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
  545. 512))
  546. mci.vr = 0;
  547. } else {
  548. mci.vr = 0;
  549. }
  550. if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
  551. && (lc == 11 || lc == 12)) {
  552. if (write_guest_abs(vcpu, ext_sa_addr + 1024,
  553. &vcpu->run->s.regs.gscb, 32))
  554. mci.gs = 0;
  555. } else {
  556. mci.gs = 0;
  557. }
  558. /* General interruption information */
  559. rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
  560. rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
  561. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  562. rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
  563. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  564. rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
  565. /* Register-save areas */
  566. if (MACHINE_HAS_VX) {
  567. convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
  568. rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
  569. } else {
  570. rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
  571. vcpu->run->s.regs.fprs, 128);
  572. }
  573. rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
  574. vcpu->run->s.regs.gprs, 128);
  575. rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
  576. (u32 __user *) __LC_FP_CREG_SAVE_AREA);
  577. rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
  578. (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
  579. rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
  580. (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
  581. rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
  582. (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
  583. rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
  584. &vcpu->run->s.regs.acrs, 64);
  585. rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
  586. &vcpu->arch.sie_block->gcr, 128);
  587. /* Extended interruption information */
  588. rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
  589. (u32 __user *) __LC_EXT_DAMAGE_CODE);
  590. rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
  591. (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
  592. rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
  593. sizeof(mchk->fixed_logout));
  594. return rc ? -EFAULT : 0;
  595. }
  596. static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
  597. {
  598. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  599. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  600. struct kvm_s390_mchk_info mchk = {};
  601. int deliver = 0;
  602. int rc = 0;
  603. spin_lock(&fi->lock);
  604. spin_lock(&li->lock);
  605. if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
  606. test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
  607. /*
  608. * If there was an exigent machine check pending, then any
  609. * repressible machine checks that might have been pending
  610. * are indicated along with it, so always clear bits for
  611. * repressible and exigent interrupts
  612. */
  613. mchk = li->irq.mchk;
  614. clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
  615. clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
  616. memset(&li->irq.mchk, 0, sizeof(mchk));
  617. deliver = 1;
  618. }
  619. /*
  620. * We indicate floating repressible conditions along with
  621. * other pending conditions. Channel Report Pending and Channel
  622. * Subsystem damage are the only two and are indicated by
  623. * bits in mcic and masked in cr14.
  624. */
  625. if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
  626. mchk.mcic |= fi->mchk.mcic;
  627. mchk.cr14 |= fi->mchk.cr14;
  628. memset(&fi->mchk, 0, sizeof(mchk));
  629. deliver = 1;
  630. }
  631. spin_unlock(&li->lock);
  632. spin_unlock(&fi->lock);
  633. if (deliver) {
  634. VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
  635. mchk.mcic);
  636. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  637. KVM_S390_MCHK,
  638. mchk.cr14, mchk.mcic);
  639. vcpu->stat.deliver_machine_check++;
  640. rc = __write_machine_check(vcpu, &mchk);
  641. }
  642. return rc;
  643. }
  644. static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
  645. {
  646. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  647. int rc = 0;
  648. VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
  649. vcpu->stat.deliver_restart_signal++;
  650. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
  651. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  652. vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
  653. } else {
  654. rc = write_guest_lc(vcpu,
  655. offsetof(struct lowcore, restart_old_psw),
  656. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  657. rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
  658. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  659. }
  660. clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
  661. return rc ? -EFAULT : 0;
  662. }
  663. static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
  664. {
  665. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  666. struct kvm_s390_prefix_info prefix;
  667. spin_lock(&li->lock);
  668. prefix = li->irq.prefix;
  669. li->irq.prefix.address = 0;
  670. clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
  671. spin_unlock(&li->lock);
  672. vcpu->stat.deliver_prefix_signal++;
  673. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  674. KVM_S390_SIGP_SET_PREFIX,
  675. prefix.address, 0);
  676. kvm_s390_set_prefix(vcpu, prefix.address);
  677. return 0;
  678. }
  679. static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
  680. {
  681. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  682. int rc;
  683. int cpu_addr;
  684. spin_lock(&li->lock);
  685. cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
  686. clear_bit(cpu_addr, li->sigp_emerg_pending);
  687. if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
  688. clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
  689. spin_unlock(&li->lock);
  690. VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
  691. vcpu->stat.deliver_emergency_signal++;
  692. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
  693. cpu_addr, 0);
  694. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  695. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  696. vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
  697. vcpu->arch.sie_block->extcpuaddr = cpu_addr;
  698. return 0;
  699. }
  700. rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
  701. (u16 *)__LC_EXT_INT_CODE);
  702. rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
  703. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  704. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  705. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  706. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  707. return rc ? -EFAULT : 0;
  708. }
  709. static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
  710. {
  711. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  712. struct kvm_s390_extcall_info extcall;
  713. int rc;
  714. spin_lock(&li->lock);
  715. extcall = li->irq.extcall;
  716. li->irq.extcall.code = 0;
  717. clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
  718. spin_unlock(&li->lock);
  719. VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
  720. vcpu->stat.deliver_external_call++;
  721. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  722. KVM_S390_INT_EXTERNAL_CALL,
  723. extcall.code, 0);
  724. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  725. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  726. vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
  727. vcpu->arch.sie_block->extcpuaddr = extcall.code;
  728. return 0;
  729. }
  730. rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
  731. (u16 *)__LC_EXT_INT_CODE);
  732. rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
  733. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  734. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  735. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
  736. sizeof(psw_t));
  737. return rc ? -EFAULT : 0;
  738. }
  739. static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
  740. {
  741. switch (code) {
  742. case PGM_SPECIFICATION:
  743. vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
  744. break;
  745. case PGM_OPERAND:
  746. vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
  747. break;
  748. default:
  749. return -EINVAL;
  750. }
  751. return 0;
  752. }
  753. static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
  754. {
  755. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  756. struct kvm_s390_pgm_info pgm_info;
  757. int rc = 0, nullifying = false;
  758. u16 ilen;
  759. spin_lock(&li->lock);
  760. pgm_info = li->irq.pgm;
  761. clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
  762. memset(&li->irq.pgm, 0, sizeof(pgm_info));
  763. spin_unlock(&li->lock);
  764. ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
  765. VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
  766. pgm_info.code, ilen);
  767. vcpu->stat.deliver_program++;
  768. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
  769. pgm_info.code, 0);
  770. /* PER is handled by the ultravisor */
  771. if (kvm_s390_pv_cpu_is_protected(vcpu))
  772. return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
  773. switch (pgm_info.code & ~PGM_PER) {
  774. case PGM_AFX_TRANSLATION:
  775. case PGM_ASX_TRANSLATION:
  776. case PGM_EX_TRANSLATION:
  777. case PGM_LFX_TRANSLATION:
  778. case PGM_LSTE_SEQUENCE:
  779. case PGM_LSX_TRANSLATION:
  780. case PGM_LX_TRANSLATION:
  781. case PGM_PRIMARY_AUTHORITY:
  782. case PGM_SECONDARY_AUTHORITY:
  783. nullifying = true;
  784. fallthrough;
  785. case PGM_SPACE_SWITCH:
  786. rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
  787. (u64 *)__LC_TRANS_EXC_CODE);
  788. break;
  789. case PGM_ALEN_TRANSLATION:
  790. case PGM_ALE_SEQUENCE:
  791. case PGM_ASTE_INSTANCE:
  792. case PGM_ASTE_SEQUENCE:
  793. case PGM_ASTE_VALIDITY:
  794. case PGM_EXTENDED_AUTHORITY:
  795. rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
  796. (u8 *)__LC_EXC_ACCESS_ID);
  797. nullifying = true;
  798. break;
  799. case PGM_ASCE_TYPE:
  800. case PGM_PAGE_TRANSLATION:
  801. case PGM_REGION_FIRST_TRANS:
  802. case PGM_REGION_SECOND_TRANS:
  803. case PGM_REGION_THIRD_TRANS:
  804. case PGM_SEGMENT_TRANSLATION:
  805. rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
  806. (u64 *)__LC_TRANS_EXC_CODE);
  807. rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
  808. (u8 *)__LC_EXC_ACCESS_ID);
  809. rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
  810. (u8 *)__LC_OP_ACCESS_ID);
  811. nullifying = true;
  812. break;
  813. case PGM_MONITOR:
  814. rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
  815. (u16 *)__LC_MON_CLASS_NR);
  816. rc |= put_guest_lc(vcpu, pgm_info.mon_code,
  817. (u64 *)__LC_MON_CODE);
  818. break;
  819. case PGM_VECTOR_PROCESSING:
  820. case PGM_DATA:
  821. rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
  822. (u32 *)__LC_DATA_EXC_CODE);
  823. break;
  824. case PGM_PROTECTION:
  825. rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
  826. (u64 *)__LC_TRANS_EXC_CODE);
  827. rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
  828. (u8 *)__LC_EXC_ACCESS_ID);
  829. break;
  830. case PGM_STACK_FULL:
  831. case PGM_STACK_EMPTY:
  832. case PGM_STACK_SPECIFICATION:
  833. case PGM_STACK_TYPE:
  834. case PGM_STACK_OPERATION:
  835. case PGM_TRACE_TABEL:
  836. case PGM_CRYPTO_OPERATION:
  837. nullifying = true;
  838. break;
  839. }
  840. if (pgm_info.code & PGM_PER) {
  841. rc |= put_guest_lc(vcpu, pgm_info.per_code,
  842. (u8 *) __LC_PER_CODE);
  843. rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
  844. (u8 *)__LC_PER_ATMID);
  845. rc |= put_guest_lc(vcpu, pgm_info.per_address,
  846. (u64 *) __LC_PER_ADDRESS);
  847. rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
  848. (u8 *) __LC_PER_ACCESS_ID);
  849. }
  850. if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
  851. kvm_s390_rewind_psw(vcpu, ilen);
  852. /* bit 1+2 of the target are the ilc, so we can directly use ilen */
  853. rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
  854. rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
  855. (u64 *) __LC_PGM_LAST_BREAK);
  856. rc |= put_guest_lc(vcpu, pgm_info.code,
  857. (u16 *)__LC_PGM_INT_CODE);
  858. rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
  859. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  860. rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
  861. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  862. return rc ? -EFAULT : 0;
  863. }
  864. #define SCCB_MASK 0xFFFFFFF8
  865. #define SCCB_EVENT_PENDING 0x3
  866. static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
  867. {
  868. int rc;
  869. if (kvm_s390_pv_cpu_get_handle(vcpu)) {
  870. vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
  871. vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
  872. vcpu->arch.sie_block->eiparams = parm;
  873. return 0;
  874. }
  875. rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
  876. rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
  877. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  878. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  879. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  880. &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
  881. rc |= put_guest_lc(vcpu, parm,
  882. (u32 *)__LC_EXT_PARAMS);
  883. return rc ? -EFAULT : 0;
  884. }
  885. static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
  886. {
  887. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  888. struct kvm_s390_ext_info ext;
  889. spin_lock(&fi->lock);
  890. if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
  891. !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
  892. spin_unlock(&fi->lock);
  893. return 0;
  894. }
  895. ext = fi->srv_signal;
  896. memset(&fi->srv_signal, 0, sizeof(ext));
  897. clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
  898. clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
  899. if (kvm_s390_pv_cpu_is_protected(vcpu))
  900. set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
  901. spin_unlock(&fi->lock);
  902. VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
  903. ext.ext_params);
  904. vcpu->stat.deliver_service_signal++;
  905. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
  906. ext.ext_params, 0);
  907. return write_sclp(vcpu, ext.ext_params);
  908. }
  909. static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
  910. {
  911. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  912. struct kvm_s390_ext_info ext;
  913. spin_lock(&fi->lock);
  914. if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
  915. spin_unlock(&fi->lock);
  916. return 0;
  917. }
  918. ext = fi->srv_signal;
  919. /* only clear the event bit */
  920. fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
  921. clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
  922. spin_unlock(&fi->lock);
  923. VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
  924. vcpu->stat.deliver_service_signal++;
  925. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
  926. ext.ext_params, 0);
  927. return write_sclp(vcpu, SCCB_EVENT_PENDING);
  928. }
  929. static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
  930. {
  931. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  932. struct kvm_s390_interrupt_info *inti;
  933. int rc = 0;
  934. spin_lock(&fi->lock);
  935. inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
  936. struct kvm_s390_interrupt_info,
  937. list);
  938. if (inti) {
  939. list_del(&inti->list);
  940. fi->counters[FIRQ_CNTR_PFAULT] -= 1;
  941. }
  942. if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
  943. clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
  944. spin_unlock(&fi->lock);
  945. if (inti) {
  946. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  947. KVM_S390_INT_PFAULT_DONE, 0,
  948. inti->ext.ext_params2);
  949. VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
  950. inti->ext.ext_params2);
  951. rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
  952. (u16 *)__LC_EXT_INT_CODE);
  953. rc |= put_guest_lc(vcpu, PFAULT_DONE,
  954. (u16 *)__LC_EXT_CPU_ADDR);
  955. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  956. &vcpu->arch.sie_block->gpsw,
  957. sizeof(psw_t));
  958. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  959. &vcpu->arch.sie_block->gpsw,
  960. sizeof(psw_t));
  961. rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
  962. (u64 *)__LC_EXT_PARAMS2);
  963. kfree(inti);
  964. }
  965. return rc ? -EFAULT : 0;
  966. }
  967. static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
  968. {
  969. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  970. struct kvm_s390_interrupt_info *inti;
  971. int rc = 0;
  972. spin_lock(&fi->lock);
  973. inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
  974. struct kvm_s390_interrupt_info,
  975. list);
  976. if (inti) {
  977. VCPU_EVENT(vcpu, 4,
  978. "deliver: virtio parm: 0x%x,parm64: 0x%llx",
  979. inti->ext.ext_params, inti->ext.ext_params2);
  980. vcpu->stat.deliver_virtio++;
  981. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  982. inti->type,
  983. inti->ext.ext_params,
  984. inti->ext.ext_params2);
  985. list_del(&inti->list);
  986. fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
  987. }
  988. if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
  989. clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
  990. spin_unlock(&fi->lock);
  991. if (inti) {
  992. rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
  993. (u16 *)__LC_EXT_INT_CODE);
  994. rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
  995. (u16 *)__LC_EXT_CPU_ADDR);
  996. rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
  997. &vcpu->arch.sie_block->gpsw,
  998. sizeof(psw_t));
  999. rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
  1000. &vcpu->arch.sie_block->gpsw,
  1001. sizeof(psw_t));
  1002. rc |= put_guest_lc(vcpu, inti->ext.ext_params,
  1003. (u32 *)__LC_EXT_PARAMS);
  1004. rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
  1005. (u64 *)__LC_EXT_PARAMS2);
  1006. kfree(inti);
  1007. }
  1008. return rc ? -EFAULT : 0;
  1009. }
  1010. static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
  1011. {
  1012. int rc;
  1013. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  1014. vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
  1015. vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
  1016. vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
  1017. vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
  1018. vcpu->arch.sie_block->io_int_word = io->io_int_word;
  1019. return 0;
  1020. }
  1021. rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
  1022. rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
  1023. rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
  1024. rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
  1025. rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
  1026. &vcpu->arch.sie_block->gpsw,
  1027. sizeof(psw_t));
  1028. rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
  1029. &vcpu->arch.sie_block->gpsw,
  1030. sizeof(psw_t));
  1031. return rc ? -EFAULT : 0;
  1032. }
  1033. static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
  1034. unsigned long irq_type)
  1035. {
  1036. struct list_head *isc_list;
  1037. struct kvm_s390_float_interrupt *fi;
  1038. struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
  1039. struct kvm_s390_interrupt_info *inti = NULL;
  1040. struct kvm_s390_io_info io;
  1041. u32 isc;
  1042. int rc = 0;
  1043. fi = &vcpu->kvm->arch.float_int;
  1044. spin_lock(&fi->lock);
  1045. isc = irq_type_to_isc(irq_type);
  1046. isc_list = &fi->lists[isc];
  1047. inti = list_first_entry_or_null(isc_list,
  1048. struct kvm_s390_interrupt_info,
  1049. list);
  1050. if (inti) {
  1051. if (inti->type & KVM_S390_INT_IO_AI_MASK)
  1052. VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
  1053. else
  1054. VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
  1055. inti->io.subchannel_id >> 8,
  1056. inti->io.subchannel_id >> 1 & 0x3,
  1057. inti->io.subchannel_nr);
  1058. vcpu->stat.deliver_io++;
  1059. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  1060. inti->type,
  1061. ((__u32)inti->io.subchannel_id << 16) |
  1062. inti->io.subchannel_nr,
  1063. ((__u64)inti->io.io_int_parm << 32) |
  1064. inti->io.io_int_word);
  1065. list_del(&inti->list);
  1066. fi->counters[FIRQ_CNTR_IO] -= 1;
  1067. }
  1068. if (list_empty(isc_list))
  1069. clear_bit(irq_type, &fi->pending_irqs);
  1070. spin_unlock(&fi->lock);
  1071. if (inti) {
  1072. rc = __do_deliver_io(vcpu, &(inti->io));
  1073. kfree(inti);
  1074. goto out;
  1075. }
  1076. if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
  1077. /*
  1078. * in case an adapter interrupt was not delivered
  1079. * in SIE context KVM will handle the delivery
  1080. */
  1081. VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
  1082. memset(&io, 0, sizeof(io));
  1083. io.io_int_word = isc_to_int_word(isc);
  1084. vcpu->stat.deliver_io++;
  1085. trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
  1086. KVM_S390_INT_IO(1, 0, 0, 0),
  1087. ((__u32)io.subchannel_id << 16) |
  1088. io.subchannel_nr,
  1089. ((__u64)io.io_int_parm << 32) |
  1090. io.io_int_word);
  1091. rc = __do_deliver_io(vcpu, &io);
  1092. }
  1093. out:
  1094. return rc;
  1095. }
  1096. /* Check whether an external call is pending (deliverable or not) */
  1097. int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
  1098. {
  1099. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1100. if (!sclp.has_sigpif)
  1101. return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
  1102. return sca_ext_call_pending(vcpu, NULL);
  1103. }
  1104. int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
  1105. {
  1106. if (deliverable_irqs(vcpu))
  1107. return 1;
  1108. if (kvm_cpu_has_pending_timer(vcpu))
  1109. return 1;
  1110. /* external call pending and deliverable */
  1111. if (kvm_s390_ext_call_pending(vcpu) &&
  1112. !psw_extint_disabled(vcpu) &&
  1113. (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
  1114. return 1;
  1115. if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
  1116. return 1;
  1117. return 0;
  1118. }
  1119. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  1120. {
  1121. return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
  1122. }
  1123. static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
  1124. {
  1125. const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
  1126. const u64 ckc = vcpu->arch.sie_block->ckc;
  1127. u64 cputm, sltime = 0;
  1128. if (ckc_interrupts_enabled(vcpu)) {
  1129. if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
  1130. if ((s64)now < (s64)ckc)
  1131. sltime = tod_to_ns((s64)ckc - (s64)now);
  1132. } else if (now < ckc) {
  1133. sltime = tod_to_ns(ckc - now);
  1134. }
  1135. /* already expired */
  1136. if (!sltime)
  1137. return 0;
  1138. if (cpu_timer_interrupts_enabled(vcpu)) {
  1139. cputm = kvm_s390_get_cpu_timer(vcpu);
  1140. /* already expired? */
  1141. if (cputm >> 63)
  1142. return 0;
  1143. return min_t(u64, sltime, tod_to_ns(cputm));
  1144. }
  1145. } else if (cpu_timer_interrupts_enabled(vcpu)) {
  1146. sltime = kvm_s390_get_cpu_timer(vcpu);
  1147. /* already expired? */
  1148. if (sltime >> 63)
  1149. return 0;
  1150. }
  1151. return sltime;
  1152. }
  1153. int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
  1154. {
  1155. struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
  1156. u64 sltime;
  1157. vcpu->stat.exit_wait_state++;
  1158. /* fast path */
  1159. if (kvm_arch_vcpu_runnable(vcpu))
  1160. return 0;
  1161. if (psw_interrupts_disabled(vcpu)) {
  1162. VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
  1163. return -EOPNOTSUPP; /* disabled wait */
  1164. }
  1165. if (gi->origin &&
  1166. (gisa_get_ipm_or_restore_iam(gi) &
  1167. vcpu->arch.sie_block->gcr[6] >> 24))
  1168. return 0;
  1169. if (!ckc_interrupts_enabled(vcpu) &&
  1170. !cpu_timer_interrupts_enabled(vcpu)) {
  1171. VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
  1172. __set_cpu_idle(vcpu);
  1173. goto no_timer;
  1174. }
  1175. sltime = __calculate_sltime(vcpu);
  1176. if (!sltime)
  1177. return 0;
  1178. __set_cpu_idle(vcpu);
  1179. hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
  1180. VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
  1181. no_timer:
  1182. kvm_vcpu_srcu_read_unlock(vcpu);
  1183. kvm_vcpu_halt(vcpu);
  1184. vcpu->valid_wakeup = false;
  1185. __unset_cpu_idle(vcpu);
  1186. kvm_vcpu_srcu_read_lock(vcpu);
  1187. hrtimer_cancel(&vcpu->arch.ckc_timer);
  1188. return 0;
  1189. }
  1190. void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
  1191. {
  1192. vcpu->valid_wakeup = true;
  1193. kvm_vcpu_wake_up(vcpu);
  1194. /*
  1195. * The VCPU might not be sleeping but rather executing VSIE. Let's
  1196. * kick it, so it leaves the SIE to process the request.
  1197. */
  1198. kvm_s390_vsie_kick(vcpu);
  1199. }
  1200. enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
  1201. {
  1202. struct kvm_vcpu *vcpu;
  1203. u64 sltime;
  1204. vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
  1205. sltime = __calculate_sltime(vcpu);
  1206. /*
  1207. * If the monotonic clock runs faster than the tod clock we might be
  1208. * woken up too early and have to go back to sleep to avoid deadlocks.
  1209. */
  1210. if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
  1211. return HRTIMER_RESTART;
  1212. kvm_s390_vcpu_wakeup(vcpu);
  1213. return HRTIMER_NORESTART;
  1214. }
  1215. void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
  1216. {
  1217. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1218. spin_lock(&li->lock);
  1219. li->pending_irqs = 0;
  1220. bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
  1221. memset(&li->irq, 0, sizeof(li->irq));
  1222. spin_unlock(&li->lock);
  1223. sca_clear_ext_call(vcpu);
  1224. }
  1225. int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
  1226. {
  1227. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1228. int rc = 0;
  1229. unsigned long irq_type;
  1230. unsigned long irqs;
  1231. __reset_intercept_indicators(vcpu);
  1232. /* pending ckc conditions might have been invalidated */
  1233. clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  1234. if (ckc_irq_pending(vcpu))
  1235. set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  1236. /* pending cpu timer conditions might have been invalidated */
  1237. clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  1238. if (cpu_timer_irq_pending(vcpu))
  1239. set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  1240. while ((irqs = deliverable_irqs(vcpu)) && !rc) {
  1241. /* bits are in the reverse order of interrupt priority */
  1242. irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
  1243. switch (irq_type) {
  1244. case IRQ_PEND_IO_ISC_0:
  1245. case IRQ_PEND_IO_ISC_1:
  1246. case IRQ_PEND_IO_ISC_2:
  1247. case IRQ_PEND_IO_ISC_3:
  1248. case IRQ_PEND_IO_ISC_4:
  1249. case IRQ_PEND_IO_ISC_5:
  1250. case IRQ_PEND_IO_ISC_6:
  1251. case IRQ_PEND_IO_ISC_7:
  1252. rc = __deliver_io(vcpu, irq_type);
  1253. break;
  1254. case IRQ_PEND_MCHK_EX:
  1255. case IRQ_PEND_MCHK_REP:
  1256. rc = __deliver_machine_check(vcpu);
  1257. break;
  1258. case IRQ_PEND_PROG:
  1259. rc = __deliver_prog(vcpu);
  1260. break;
  1261. case IRQ_PEND_EXT_EMERGENCY:
  1262. rc = __deliver_emergency_signal(vcpu);
  1263. break;
  1264. case IRQ_PEND_EXT_EXTERNAL:
  1265. rc = __deliver_external_call(vcpu);
  1266. break;
  1267. case IRQ_PEND_EXT_CLOCK_COMP:
  1268. rc = __deliver_ckc(vcpu);
  1269. break;
  1270. case IRQ_PEND_EXT_CPU_TIMER:
  1271. rc = __deliver_cpu_timer(vcpu);
  1272. break;
  1273. case IRQ_PEND_RESTART:
  1274. rc = __deliver_restart(vcpu);
  1275. break;
  1276. case IRQ_PEND_SET_PREFIX:
  1277. rc = __deliver_set_prefix(vcpu);
  1278. break;
  1279. case IRQ_PEND_PFAULT_INIT:
  1280. rc = __deliver_pfault_init(vcpu);
  1281. break;
  1282. case IRQ_PEND_EXT_SERVICE:
  1283. rc = __deliver_service(vcpu);
  1284. break;
  1285. case IRQ_PEND_EXT_SERVICE_EV:
  1286. rc = __deliver_service_ev(vcpu);
  1287. break;
  1288. case IRQ_PEND_PFAULT_DONE:
  1289. rc = __deliver_pfault_done(vcpu);
  1290. break;
  1291. case IRQ_PEND_VIRTIO:
  1292. rc = __deliver_virtio(vcpu);
  1293. break;
  1294. default:
  1295. WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
  1296. clear_bit(irq_type, &li->pending_irqs);
  1297. }
  1298. }
  1299. set_intercept_indicators(vcpu);
  1300. return rc;
  1301. }
  1302. static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1303. {
  1304. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1305. vcpu->stat.inject_program++;
  1306. VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
  1307. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
  1308. irq->u.pgm.code, 0);
  1309. if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
  1310. /* auto detection if no valid ILC was given */
  1311. irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
  1312. irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
  1313. irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
  1314. }
  1315. if (irq->u.pgm.code == PGM_PER) {
  1316. li->irq.pgm.code |= PGM_PER;
  1317. li->irq.pgm.flags = irq->u.pgm.flags;
  1318. /* only modify PER related information */
  1319. li->irq.pgm.per_address = irq->u.pgm.per_address;
  1320. li->irq.pgm.per_code = irq->u.pgm.per_code;
  1321. li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
  1322. li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
  1323. } else if (!(irq->u.pgm.code & PGM_PER)) {
  1324. li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
  1325. irq->u.pgm.code;
  1326. li->irq.pgm.flags = irq->u.pgm.flags;
  1327. /* only modify non-PER information */
  1328. li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
  1329. li->irq.pgm.mon_code = irq->u.pgm.mon_code;
  1330. li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
  1331. li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
  1332. li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
  1333. li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
  1334. } else {
  1335. li->irq.pgm = irq->u.pgm;
  1336. }
  1337. set_bit(IRQ_PEND_PROG, &li->pending_irqs);
  1338. return 0;
  1339. }
  1340. static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1341. {
  1342. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1343. vcpu->stat.inject_pfault_init++;
  1344. VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
  1345. irq->u.ext.ext_params2);
  1346. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
  1347. irq->u.ext.ext_params,
  1348. irq->u.ext.ext_params2);
  1349. li->irq.ext = irq->u.ext;
  1350. set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
  1351. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1352. return 0;
  1353. }
  1354. static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1355. {
  1356. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1357. struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
  1358. uint16_t src_id = irq->u.extcall.code;
  1359. vcpu->stat.inject_external_call++;
  1360. VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
  1361. src_id);
  1362. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
  1363. src_id, 0);
  1364. /* sending vcpu invalid */
  1365. if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
  1366. return -EINVAL;
  1367. if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
  1368. return sca_inject_ext_call(vcpu, src_id);
  1369. if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
  1370. return -EBUSY;
  1371. *extcall = irq->u.extcall;
  1372. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1373. return 0;
  1374. }
  1375. static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1376. {
  1377. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1378. struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
  1379. vcpu->stat.inject_set_prefix++;
  1380. VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
  1381. irq->u.prefix.address);
  1382. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
  1383. irq->u.prefix.address, 0);
  1384. if (!is_vcpu_stopped(vcpu))
  1385. return -EBUSY;
  1386. *prefix = irq->u.prefix;
  1387. set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
  1388. return 0;
  1389. }
  1390. #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
  1391. static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1392. {
  1393. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1394. struct kvm_s390_stop_info *stop = &li->irq.stop;
  1395. int rc = 0;
  1396. vcpu->stat.inject_stop_signal++;
  1397. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
  1398. if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
  1399. return -EINVAL;
  1400. if (is_vcpu_stopped(vcpu)) {
  1401. if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
  1402. rc = kvm_s390_store_status_unloaded(vcpu,
  1403. KVM_S390_STORE_STATUS_NOADDR);
  1404. return rc;
  1405. }
  1406. if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
  1407. return -EBUSY;
  1408. stop->flags = irq->u.stop.flags;
  1409. kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
  1410. return 0;
  1411. }
  1412. static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
  1413. {
  1414. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1415. vcpu->stat.inject_restart++;
  1416. VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
  1417. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
  1418. set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
  1419. return 0;
  1420. }
  1421. static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
  1422. struct kvm_s390_irq *irq)
  1423. {
  1424. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1425. vcpu->stat.inject_emergency_signal++;
  1426. VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
  1427. irq->u.emerg.code);
  1428. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
  1429. irq->u.emerg.code, 0);
  1430. /* sending vcpu invalid */
  1431. if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
  1432. return -EINVAL;
  1433. set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
  1434. set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
  1435. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1436. return 0;
  1437. }
  1438. static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1439. {
  1440. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1441. struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
  1442. vcpu->stat.inject_mchk++;
  1443. VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
  1444. irq->u.mchk.mcic);
  1445. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
  1446. irq->u.mchk.mcic);
  1447. /*
  1448. * Because repressible machine checks can be indicated along with
  1449. * exigent machine checks (PoP, Chapter 11, Interruption action)
  1450. * we need to combine cr14, mcic and external damage code.
  1451. * Failing storage address and the logout area should not be or'ed
  1452. * together, we just indicate the last occurrence of the corresponding
  1453. * machine check
  1454. */
  1455. mchk->cr14 |= irq->u.mchk.cr14;
  1456. mchk->mcic |= irq->u.mchk.mcic;
  1457. mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
  1458. mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
  1459. memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
  1460. sizeof(mchk->fixed_logout));
  1461. if (mchk->mcic & MCHK_EX_MASK)
  1462. set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
  1463. else if (mchk->mcic & MCHK_REP_MASK)
  1464. set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
  1465. return 0;
  1466. }
  1467. static int __inject_ckc(struct kvm_vcpu *vcpu)
  1468. {
  1469. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1470. vcpu->stat.inject_ckc++;
  1471. VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
  1472. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
  1473. 0, 0);
  1474. set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
  1475. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1476. return 0;
  1477. }
  1478. static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
  1479. {
  1480. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1481. vcpu->stat.inject_cputm++;
  1482. VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
  1483. trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
  1484. 0, 0);
  1485. set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
  1486. kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
  1487. return 0;
  1488. }
  1489. static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
  1490. int isc, u32 schid)
  1491. {
  1492. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1493. struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
  1494. struct kvm_s390_interrupt_info *iter;
  1495. u16 id = (schid & 0xffff0000U) >> 16;
  1496. u16 nr = schid & 0x0000ffffU;
  1497. spin_lock(&fi->lock);
  1498. list_for_each_entry(iter, isc_list, list) {
  1499. if (schid && (id != iter->io.subchannel_id ||
  1500. nr != iter->io.subchannel_nr))
  1501. continue;
  1502. /* found an appropriate entry */
  1503. list_del_init(&iter->list);
  1504. fi->counters[FIRQ_CNTR_IO] -= 1;
  1505. if (list_empty(isc_list))
  1506. clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
  1507. spin_unlock(&fi->lock);
  1508. return iter;
  1509. }
  1510. spin_unlock(&fi->lock);
  1511. return NULL;
  1512. }
  1513. static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
  1514. u64 isc_mask, u32 schid)
  1515. {
  1516. struct kvm_s390_interrupt_info *inti = NULL;
  1517. int isc;
  1518. for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
  1519. if (isc_mask & isc_to_isc_bits(isc))
  1520. inti = get_io_int(kvm, isc, schid);
  1521. }
  1522. return inti;
  1523. }
  1524. static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
  1525. {
  1526. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1527. unsigned long active_mask;
  1528. int isc;
  1529. if (schid)
  1530. goto out;
  1531. if (!gi->origin)
  1532. goto out;
  1533. active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
  1534. while (active_mask) {
  1535. isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
  1536. if (gisa_tac_ipm_gisc(gi->origin, isc))
  1537. return isc;
  1538. clear_bit_inv(isc, &active_mask);
  1539. }
  1540. out:
  1541. return -EINVAL;
  1542. }
  1543. /*
  1544. * Dequeue and return an I/O interrupt matching any of the interruption
  1545. * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
  1546. * Take into account the interrupts pending in the interrupt list and in GISA.
  1547. *
  1548. * Note that for a guest that does not enable I/O interrupts
  1549. * but relies on TPI, a flood of classic interrupts may starve
  1550. * out adapter interrupts on the same isc. Linux does not do
  1551. * that, and it is possible to work around the issue by configuring
  1552. * different iscs for classic and adapter interrupts in the guest,
  1553. * but we may want to revisit this in the future.
  1554. */
  1555. struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
  1556. u64 isc_mask, u32 schid)
  1557. {
  1558. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1559. struct kvm_s390_interrupt_info *inti, *tmp_inti;
  1560. int isc;
  1561. inti = get_top_io_int(kvm, isc_mask, schid);
  1562. isc = get_top_gisa_isc(kvm, isc_mask, schid);
  1563. if (isc < 0)
  1564. /* no AI in GISA */
  1565. goto out;
  1566. if (!inti)
  1567. /* AI in GISA but no classical IO int */
  1568. goto gisa_out;
  1569. /* both types of interrupts present */
  1570. if (int_word_to_isc(inti->io.io_int_word) <= isc) {
  1571. /* classical IO int with higher priority */
  1572. gisa_set_ipm_gisc(gi->origin, isc);
  1573. goto out;
  1574. }
  1575. gisa_out:
  1576. tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
  1577. if (tmp_inti) {
  1578. tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
  1579. tmp_inti->io.io_int_word = isc_to_int_word(isc);
  1580. if (inti)
  1581. kvm_s390_reinject_io_int(kvm, inti);
  1582. inti = tmp_inti;
  1583. } else
  1584. gisa_set_ipm_gisc(gi->origin, isc);
  1585. out:
  1586. return inti;
  1587. }
  1588. static int __inject_service(struct kvm *kvm,
  1589. struct kvm_s390_interrupt_info *inti)
  1590. {
  1591. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1592. kvm->stat.inject_service_signal++;
  1593. spin_lock(&fi->lock);
  1594. fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
  1595. /* We always allow events, track them separately from the sccb ints */
  1596. if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
  1597. set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
  1598. /*
  1599. * Early versions of the QEMU s390 bios will inject several
  1600. * service interrupts after another without handling a
  1601. * condition code indicating busy.
  1602. * We will silently ignore those superfluous sccb values.
  1603. * A future version of QEMU will take care of serialization
  1604. * of servc requests
  1605. */
  1606. if (fi->srv_signal.ext_params & SCCB_MASK)
  1607. goto out;
  1608. fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
  1609. set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
  1610. out:
  1611. spin_unlock(&fi->lock);
  1612. kfree(inti);
  1613. return 0;
  1614. }
  1615. static int __inject_virtio(struct kvm *kvm,
  1616. struct kvm_s390_interrupt_info *inti)
  1617. {
  1618. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1619. kvm->stat.inject_virtio++;
  1620. spin_lock(&fi->lock);
  1621. if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
  1622. spin_unlock(&fi->lock);
  1623. return -EBUSY;
  1624. }
  1625. fi->counters[FIRQ_CNTR_VIRTIO] += 1;
  1626. list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
  1627. set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
  1628. spin_unlock(&fi->lock);
  1629. return 0;
  1630. }
  1631. static int __inject_pfault_done(struct kvm *kvm,
  1632. struct kvm_s390_interrupt_info *inti)
  1633. {
  1634. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1635. kvm->stat.inject_pfault_done++;
  1636. spin_lock(&fi->lock);
  1637. if (fi->counters[FIRQ_CNTR_PFAULT] >=
  1638. (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
  1639. spin_unlock(&fi->lock);
  1640. return -EBUSY;
  1641. }
  1642. fi->counters[FIRQ_CNTR_PFAULT] += 1;
  1643. list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
  1644. set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
  1645. spin_unlock(&fi->lock);
  1646. return 0;
  1647. }
  1648. #define CR_PENDING_SUBCLASS 28
  1649. static int __inject_float_mchk(struct kvm *kvm,
  1650. struct kvm_s390_interrupt_info *inti)
  1651. {
  1652. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1653. kvm->stat.inject_float_mchk++;
  1654. spin_lock(&fi->lock);
  1655. fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
  1656. fi->mchk.mcic |= inti->mchk.mcic;
  1657. set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
  1658. spin_unlock(&fi->lock);
  1659. kfree(inti);
  1660. return 0;
  1661. }
  1662. static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
  1663. {
  1664. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1665. struct kvm_s390_float_interrupt *fi;
  1666. struct list_head *list;
  1667. int isc;
  1668. kvm->stat.inject_io++;
  1669. isc = int_word_to_isc(inti->io.io_int_word);
  1670. /*
  1671. * We do not use the lock checking variant as this is just a
  1672. * performance optimization and we do not hold the lock here.
  1673. * This is ok as the code will pick interrupts from both "lists"
  1674. * for delivery.
  1675. */
  1676. if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
  1677. VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
  1678. gisa_set_ipm_gisc(gi->origin, isc);
  1679. kfree(inti);
  1680. return 0;
  1681. }
  1682. fi = &kvm->arch.float_int;
  1683. spin_lock(&fi->lock);
  1684. if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
  1685. spin_unlock(&fi->lock);
  1686. return -EBUSY;
  1687. }
  1688. fi->counters[FIRQ_CNTR_IO] += 1;
  1689. if (inti->type & KVM_S390_INT_IO_AI_MASK)
  1690. VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
  1691. else
  1692. VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
  1693. inti->io.subchannel_id >> 8,
  1694. inti->io.subchannel_id >> 1 & 0x3,
  1695. inti->io.subchannel_nr);
  1696. list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
  1697. list_add_tail(&inti->list, list);
  1698. set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
  1699. spin_unlock(&fi->lock);
  1700. return 0;
  1701. }
  1702. /*
  1703. * Find a destination VCPU for a floating irq and kick it.
  1704. */
  1705. static void __floating_irq_kick(struct kvm *kvm, u64 type)
  1706. {
  1707. struct kvm_vcpu *dst_vcpu;
  1708. int sigcpu, online_vcpus, nr_tries = 0;
  1709. online_vcpus = atomic_read(&kvm->online_vcpus);
  1710. if (!online_vcpus)
  1711. return;
  1712. /* find idle VCPUs first, then round robin */
  1713. sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
  1714. if (sigcpu == online_vcpus) {
  1715. do {
  1716. sigcpu = kvm->arch.float_int.next_rr_cpu++;
  1717. kvm->arch.float_int.next_rr_cpu %= online_vcpus;
  1718. /* avoid endless loops if all vcpus are stopped */
  1719. if (nr_tries++ >= online_vcpus)
  1720. return;
  1721. } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
  1722. }
  1723. dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
  1724. /* make the VCPU drop out of the SIE, or wake it up if sleeping */
  1725. switch (type) {
  1726. case KVM_S390_MCHK:
  1727. kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
  1728. break;
  1729. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1730. if (!(type & KVM_S390_INT_IO_AI_MASK &&
  1731. kvm->arch.gisa_int.origin) ||
  1732. kvm_s390_pv_cpu_get_handle(dst_vcpu))
  1733. kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
  1734. break;
  1735. default:
  1736. kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
  1737. break;
  1738. }
  1739. kvm_s390_vcpu_wakeup(dst_vcpu);
  1740. }
  1741. static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
  1742. {
  1743. u64 type = READ_ONCE(inti->type);
  1744. int rc;
  1745. switch (type) {
  1746. case KVM_S390_MCHK:
  1747. rc = __inject_float_mchk(kvm, inti);
  1748. break;
  1749. case KVM_S390_INT_VIRTIO:
  1750. rc = __inject_virtio(kvm, inti);
  1751. break;
  1752. case KVM_S390_INT_SERVICE:
  1753. rc = __inject_service(kvm, inti);
  1754. break;
  1755. case KVM_S390_INT_PFAULT_DONE:
  1756. rc = __inject_pfault_done(kvm, inti);
  1757. break;
  1758. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1759. rc = __inject_io(kvm, inti);
  1760. break;
  1761. default:
  1762. rc = -EINVAL;
  1763. }
  1764. if (rc)
  1765. return rc;
  1766. __floating_irq_kick(kvm, type);
  1767. return 0;
  1768. }
  1769. int kvm_s390_inject_vm(struct kvm *kvm,
  1770. struct kvm_s390_interrupt *s390int)
  1771. {
  1772. struct kvm_s390_interrupt_info *inti;
  1773. int rc;
  1774. inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
  1775. if (!inti)
  1776. return -ENOMEM;
  1777. inti->type = s390int->type;
  1778. switch (inti->type) {
  1779. case KVM_S390_INT_VIRTIO:
  1780. VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
  1781. s390int->parm, s390int->parm64);
  1782. inti->ext.ext_params = s390int->parm;
  1783. inti->ext.ext_params2 = s390int->parm64;
  1784. break;
  1785. case KVM_S390_INT_SERVICE:
  1786. VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
  1787. inti->ext.ext_params = s390int->parm;
  1788. break;
  1789. case KVM_S390_INT_PFAULT_DONE:
  1790. inti->ext.ext_params2 = s390int->parm64;
  1791. break;
  1792. case KVM_S390_MCHK:
  1793. VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
  1794. s390int->parm64);
  1795. inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
  1796. inti->mchk.mcic = s390int->parm64;
  1797. break;
  1798. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1799. inti->io.subchannel_id = s390int->parm >> 16;
  1800. inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
  1801. inti->io.io_int_parm = s390int->parm64 >> 32;
  1802. inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
  1803. break;
  1804. default:
  1805. kfree(inti);
  1806. return -EINVAL;
  1807. }
  1808. trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
  1809. 2);
  1810. rc = __inject_vm(kvm, inti);
  1811. if (rc)
  1812. kfree(inti);
  1813. return rc;
  1814. }
  1815. int kvm_s390_reinject_io_int(struct kvm *kvm,
  1816. struct kvm_s390_interrupt_info *inti)
  1817. {
  1818. return __inject_vm(kvm, inti);
  1819. }
  1820. int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
  1821. struct kvm_s390_irq *irq)
  1822. {
  1823. irq->type = s390int->type;
  1824. switch (irq->type) {
  1825. case KVM_S390_PROGRAM_INT:
  1826. if (s390int->parm & 0xffff0000)
  1827. return -EINVAL;
  1828. irq->u.pgm.code = s390int->parm;
  1829. break;
  1830. case KVM_S390_SIGP_SET_PREFIX:
  1831. irq->u.prefix.address = s390int->parm;
  1832. break;
  1833. case KVM_S390_SIGP_STOP:
  1834. irq->u.stop.flags = s390int->parm;
  1835. break;
  1836. case KVM_S390_INT_EXTERNAL_CALL:
  1837. if (s390int->parm & 0xffff0000)
  1838. return -EINVAL;
  1839. irq->u.extcall.code = s390int->parm;
  1840. break;
  1841. case KVM_S390_INT_EMERGENCY:
  1842. if (s390int->parm & 0xffff0000)
  1843. return -EINVAL;
  1844. irq->u.emerg.code = s390int->parm;
  1845. break;
  1846. case KVM_S390_MCHK:
  1847. irq->u.mchk.mcic = s390int->parm64;
  1848. break;
  1849. case KVM_S390_INT_PFAULT_INIT:
  1850. irq->u.ext.ext_params = s390int->parm;
  1851. irq->u.ext.ext_params2 = s390int->parm64;
  1852. break;
  1853. case KVM_S390_RESTART:
  1854. case KVM_S390_INT_CLOCK_COMP:
  1855. case KVM_S390_INT_CPU_TIMER:
  1856. break;
  1857. default:
  1858. return -EINVAL;
  1859. }
  1860. return 0;
  1861. }
  1862. int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
  1863. {
  1864. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1865. return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
  1866. }
  1867. int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
  1868. {
  1869. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1870. return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
  1871. }
  1872. void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
  1873. {
  1874. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1875. spin_lock(&li->lock);
  1876. li->irq.stop.flags = 0;
  1877. clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
  1878. spin_unlock(&li->lock);
  1879. }
  1880. static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1881. {
  1882. int rc;
  1883. switch (irq->type) {
  1884. case KVM_S390_PROGRAM_INT:
  1885. rc = __inject_prog(vcpu, irq);
  1886. break;
  1887. case KVM_S390_SIGP_SET_PREFIX:
  1888. rc = __inject_set_prefix(vcpu, irq);
  1889. break;
  1890. case KVM_S390_SIGP_STOP:
  1891. rc = __inject_sigp_stop(vcpu, irq);
  1892. break;
  1893. case KVM_S390_RESTART:
  1894. rc = __inject_sigp_restart(vcpu);
  1895. break;
  1896. case KVM_S390_INT_CLOCK_COMP:
  1897. rc = __inject_ckc(vcpu);
  1898. break;
  1899. case KVM_S390_INT_CPU_TIMER:
  1900. rc = __inject_cpu_timer(vcpu);
  1901. break;
  1902. case KVM_S390_INT_EXTERNAL_CALL:
  1903. rc = __inject_extcall(vcpu, irq);
  1904. break;
  1905. case KVM_S390_INT_EMERGENCY:
  1906. rc = __inject_sigp_emergency(vcpu, irq);
  1907. break;
  1908. case KVM_S390_MCHK:
  1909. rc = __inject_mchk(vcpu, irq);
  1910. break;
  1911. case KVM_S390_INT_PFAULT_INIT:
  1912. rc = __inject_pfault_init(vcpu, irq);
  1913. break;
  1914. case KVM_S390_INT_VIRTIO:
  1915. case KVM_S390_INT_SERVICE:
  1916. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1917. default:
  1918. rc = -EINVAL;
  1919. }
  1920. return rc;
  1921. }
  1922. int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
  1923. {
  1924. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  1925. int rc;
  1926. spin_lock(&li->lock);
  1927. rc = do_inject_vcpu(vcpu, irq);
  1928. spin_unlock(&li->lock);
  1929. if (!rc)
  1930. kvm_s390_vcpu_wakeup(vcpu);
  1931. return rc;
  1932. }
  1933. static inline void clear_irq_list(struct list_head *_list)
  1934. {
  1935. struct kvm_s390_interrupt_info *inti, *n;
  1936. list_for_each_entry_safe(inti, n, _list, list) {
  1937. list_del(&inti->list);
  1938. kfree(inti);
  1939. }
  1940. }
  1941. static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
  1942. struct kvm_s390_irq *irq)
  1943. {
  1944. irq->type = inti->type;
  1945. switch (inti->type) {
  1946. case KVM_S390_INT_PFAULT_INIT:
  1947. case KVM_S390_INT_PFAULT_DONE:
  1948. case KVM_S390_INT_VIRTIO:
  1949. irq->u.ext = inti->ext;
  1950. break;
  1951. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  1952. irq->u.io = inti->io;
  1953. break;
  1954. }
  1955. }
  1956. void kvm_s390_clear_float_irqs(struct kvm *kvm)
  1957. {
  1958. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  1959. int i;
  1960. mutex_lock(&kvm->lock);
  1961. if (!kvm_s390_pv_is_protected(kvm))
  1962. fi->masked_irqs = 0;
  1963. mutex_unlock(&kvm->lock);
  1964. spin_lock(&fi->lock);
  1965. fi->pending_irqs = 0;
  1966. memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
  1967. memset(&fi->mchk, 0, sizeof(fi->mchk));
  1968. for (i = 0; i < FIRQ_LIST_COUNT; i++)
  1969. clear_irq_list(&fi->lists[i]);
  1970. for (i = 0; i < FIRQ_MAX_COUNT; i++)
  1971. fi->counters[i] = 0;
  1972. spin_unlock(&fi->lock);
  1973. kvm_s390_gisa_clear(kvm);
  1974. };
  1975. static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
  1976. {
  1977. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  1978. struct kvm_s390_interrupt_info *inti;
  1979. struct kvm_s390_float_interrupt *fi;
  1980. struct kvm_s390_irq *buf;
  1981. struct kvm_s390_irq *irq;
  1982. int max_irqs;
  1983. int ret = 0;
  1984. int n = 0;
  1985. int i;
  1986. if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
  1987. return -EINVAL;
  1988. /*
  1989. * We are already using -ENOMEM to signal
  1990. * userspace it may retry with a bigger buffer,
  1991. * so we need to use something else for this case
  1992. */
  1993. buf = vzalloc(len);
  1994. if (!buf)
  1995. return -ENOBUFS;
  1996. max_irqs = len / sizeof(struct kvm_s390_irq);
  1997. if (gi->origin && gisa_get_ipm(gi->origin)) {
  1998. for (i = 0; i <= MAX_ISC; i++) {
  1999. if (n == max_irqs) {
  2000. /* signal userspace to try again */
  2001. ret = -ENOMEM;
  2002. goto out_nolock;
  2003. }
  2004. if (gisa_tac_ipm_gisc(gi->origin, i)) {
  2005. irq = (struct kvm_s390_irq *) &buf[n];
  2006. irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
  2007. irq->u.io.io_int_word = isc_to_int_word(i);
  2008. n++;
  2009. }
  2010. }
  2011. }
  2012. fi = &kvm->arch.float_int;
  2013. spin_lock(&fi->lock);
  2014. for (i = 0; i < FIRQ_LIST_COUNT; i++) {
  2015. list_for_each_entry(inti, &fi->lists[i], list) {
  2016. if (n == max_irqs) {
  2017. /* signal userspace to try again */
  2018. ret = -ENOMEM;
  2019. goto out;
  2020. }
  2021. inti_to_irq(inti, &buf[n]);
  2022. n++;
  2023. }
  2024. }
  2025. if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
  2026. test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
  2027. if (n == max_irqs) {
  2028. /* signal userspace to try again */
  2029. ret = -ENOMEM;
  2030. goto out;
  2031. }
  2032. irq = (struct kvm_s390_irq *) &buf[n];
  2033. irq->type = KVM_S390_INT_SERVICE;
  2034. irq->u.ext = fi->srv_signal;
  2035. n++;
  2036. }
  2037. if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
  2038. if (n == max_irqs) {
  2039. /* signal userspace to try again */
  2040. ret = -ENOMEM;
  2041. goto out;
  2042. }
  2043. irq = (struct kvm_s390_irq *) &buf[n];
  2044. irq->type = KVM_S390_MCHK;
  2045. irq->u.mchk = fi->mchk;
  2046. n++;
  2047. }
  2048. out:
  2049. spin_unlock(&fi->lock);
  2050. out_nolock:
  2051. if (!ret && n > 0) {
  2052. if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
  2053. ret = -EFAULT;
  2054. }
  2055. vfree(buf);
  2056. return ret < 0 ? ret : n;
  2057. }
  2058. static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
  2059. {
  2060. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2061. struct kvm_s390_ais_all ais;
  2062. if (attr->attr < sizeof(ais))
  2063. return -EINVAL;
  2064. if (!test_kvm_facility(kvm, 72))
  2065. return -EOPNOTSUPP;
  2066. mutex_lock(&fi->ais_lock);
  2067. ais.simm = fi->simm;
  2068. ais.nimm = fi->nimm;
  2069. mutex_unlock(&fi->ais_lock);
  2070. if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
  2071. return -EFAULT;
  2072. return 0;
  2073. }
  2074. static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  2075. {
  2076. int r;
  2077. switch (attr->group) {
  2078. case KVM_DEV_FLIC_GET_ALL_IRQS:
  2079. r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
  2080. attr->attr);
  2081. break;
  2082. case KVM_DEV_FLIC_AISM_ALL:
  2083. r = flic_ais_mode_get_all(dev->kvm, attr);
  2084. break;
  2085. default:
  2086. r = -EINVAL;
  2087. }
  2088. return r;
  2089. }
  2090. static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
  2091. u64 addr)
  2092. {
  2093. struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
  2094. void *target = NULL;
  2095. void __user *source;
  2096. u64 size;
  2097. if (get_user(inti->type, (u64 __user *)addr))
  2098. return -EFAULT;
  2099. switch (inti->type) {
  2100. case KVM_S390_INT_PFAULT_INIT:
  2101. case KVM_S390_INT_PFAULT_DONE:
  2102. case KVM_S390_INT_VIRTIO:
  2103. case KVM_S390_INT_SERVICE:
  2104. target = (void *) &inti->ext;
  2105. source = &uptr->u.ext;
  2106. size = sizeof(inti->ext);
  2107. break;
  2108. case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
  2109. target = (void *) &inti->io;
  2110. source = &uptr->u.io;
  2111. size = sizeof(inti->io);
  2112. break;
  2113. case KVM_S390_MCHK:
  2114. target = (void *) &inti->mchk;
  2115. source = &uptr->u.mchk;
  2116. size = sizeof(inti->mchk);
  2117. break;
  2118. default:
  2119. return -EINVAL;
  2120. }
  2121. if (copy_from_user(target, source, size))
  2122. return -EFAULT;
  2123. return 0;
  2124. }
  2125. static int enqueue_floating_irq(struct kvm_device *dev,
  2126. struct kvm_device_attr *attr)
  2127. {
  2128. struct kvm_s390_interrupt_info *inti = NULL;
  2129. int r = 0;
  2130. int len = attr->attr;
  2131. if (len % sizeof(struct kvm_s390_irq) != 0)
  2132. return -EINVAL;
  2133. else if (len > KVM_S390_FLIC_MAX_BUFFER)
  2134. return -EINVAL;
  2135. while (len >= sizeof(struct kvm_s390_irq)) {
  2136. inti = kzalloc(sizeof(*inti), GFP_KERNEL_ACCOUNT);
  2137. if (!inti)
  2138. return -ENOMEM;
  2139. r = copy_irq_from_user(inti, attr->addr);
  2140. if (r) {
  2141. kfree(inti);
  2142. return r;
  2143. }
  2144. r = __inject_vm(dev->kvm, inti);
  2145. if (r) {
  2146. kfree(inti);
  2147. return r;
  2148. }
  2149. len -= sizeof(struct kvm_s390_irq);
  2150. attr->addr += sizeof(struct kvm_s390_irq);
  2151. }
  2152. return r;
  2153. }
  2154. static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
  2155. {
  2156. if (id >= MAX_S390_IO_ADAPTERS)
  2157. return NULL;
  2158. id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
  2159. return kvm->arch.adapters[id];
  2160. }
  2161. static int register_io_adapter(struct kvm_device *dev,
  2162. struct kvm_device_attr *attr)
  2163. {
  2164. struct s390_io_adapter *adapter;
  2165. struct kvm_s390_io_adapter adapter_info;
  2166. if (copy_from_user(&adapter_info,
  2167. (void __user *)attr->addr, sizeof(adapter_info)))
  2168. return -EFAULT;
  2169. if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
  2170. return -EINVAL;
  2171. adapter_info.id = array_index_nospec(adapter_info.id,
  2172. MAX_S390_IO_ADAPTERS);
  2173. if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
  2174. return -EINVAL;
  2175. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL_ACCOUNT);
  2176. if (!adapter)
  2177. return -ENOMEM;
  2178. adapter->id = adapter_info.id;
  2179. adapter->isc = adapter_info.isc;
  2180. adapter->maskable = adapter_info.maskable;
  2181. adapter->masked = false;
  2182. adapter->swap = adapter_info.swap;
  2183. adapter->suppressible = (adapter_info.flags) &
  2184. KVM_S390_ADAPTER_SUPPRESSIBLE;
  2185. dev->kvm->arch.adapters[adapter->id] = adapter;
  2186. return 0;
  2187. }
  2188. int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
  2189. {
  2190. int ret;
  2191. struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
  2192. if (!adapter || !adapter->maskable)
  2193. return -EINVAL;
  2194. ret = adapter->masked;
  2195. adapter->masked = masked;
  2196. return ret;
  2197. }
  2198. void kvm_s390_destroy_adapters(struct kvm *kvm)
  2199. {
  2200. int i;
  2201. for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
  2202. kfree(kvm->arch.adapters[i]);
  2203. }
  2204. static int modify_io_adapter(struct kvm_device *dev,
  2205. struct kvm_device_attr *attr)
  2206. {
  2207. struct kvm_s390_io_adapter_req req;
  2208. struct s390_io_adapter *adapter;
  2209. int ret;
  2210. if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
  2211. return -EFAULT;
  2212. adapter = get_io_adapter(dev->kvm, req.id);
  2213. if (!adapter)
  2214. return -EINVAL;
  2215. switch (req.type) {
  2216. case KVM_S390_IO_ADAPTER_MASK:
  2217. ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
  2218. if (ret > 0)
  2219. ret = 0;
  2220. break;
  2221. /*
  2222. * The following operations are no longer needed and therefore no-ops.
  2223. * The gpa to hva translation is done when an IRQ route is set up. The
  2224. * set_irq code uses get_user_pages_remote() to do the actual write.
  2225. */
  2226. case KVM_S390_IO_ADAPTER_MAP:
  2227. case KVM_S390_IO_ADAPTER_UNMAP:
  2228. ret = 0;
  2229. break;
  2230. default:
  2231. ret = -EINVAL;
  2232. }
  2233. return ret;
  2234. }
  2235. static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
  2236. {
  2237. const u64 isc_mask = 0xffUL << 24; /* all iscs set */
  2238. u32 schid;
  2239. if (attr->flags)
  2240. return -EINVAL;
  2241. if (attr->attr != sizeof(schid))
  2242. return -EINVAL;
  2243. if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
  2244. return -EFAULT;
  2245. if (!schid)
  2246. return -EINVAL;
  2247. kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
  2248. /*
  2249. * If userspace is conforming to the architecture, we can have at most
  2250. * one pending I/O interrupt per subchannel, so this is effectively a
  2251. * clear all.
  2252. */
  2253. return 0;
  2254. }
  2255. static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
  2256. {
  2257. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2258. struct kvm_s390_ais_req req;
  2259. int ret = 0;
  2260. if (!test_kvm_facility(kvm, 72))
  2261. return -EOPNOTSUPP;
  2262. if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
  2263. return -EFAULT;
  2264. if (req.isc > MAX_ISC)
  2265. return -EINVAL;
  2266. trace_kvm_s390_modify_ais_mode(req.isc,
  2267. (fi->simm & AIS_MODE_MASK(req.isc)) ?
  2268. (fi->nimm & AIS_MODE_MASK(req.isc)) ?
  2269. 2 : KVM_S390_AIS_MODE_SINGLE :
  2270. KVM_S390_AIS_MODE_ALL, req.mode);
  2271. mutex_lock(&fi->ais_lock);
  2272. switch (req.mode) {
  2273. case KVM_S390_AIS_MODE_ALL:
  2274. fi->simm &= ~AIS_MODE_MASK(req.isc);
  2275. fi->nimm &= ~AIS_MODE_MASK(req.isc);
  2276. break;
  2277. case KVM_S390_AIS_MODE_SINGLE:
  2278. fi->simm |= AIS_MODE_MASK(req.isc);
  2279. fi->nimm &= ~AIS_MODE_MASK(req.isc);
  2280. break;
  2281. default:
  2282. ret = -EINVAL;
  2283. }
  2284. mutex_unlock(&fi->ais_lock);
  2285. return ret;
  2286. }
  2287. static int kvm_s390_inject_airq(struct kvm *kvm,
  2288. struct s390_io_adapter *adapter)
  2289. {
  2290. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2291. struct kvm_s390_interrupt s390int = {
  2292. .type = KVM_S390_INT_IO(1, 0, 0, 0),
  2293. .parm = 0,
  2294. .parm64 = isc_to_int_word(adapter->isc),
  2295. };
  2296. int ret = 0;
  2297. if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
  2298. return kvm_s390_inject_vm(kvm, &s390int);
  2299. mutex_lock(&fi->ais_lock);
  2300. if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
  2301. trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
  2302. goto out;
  2303. }
  2304. ret = kvm_s390_inject_vm(kvm, &s390int);
  2305. if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
  2306. fi->nimm |= AIS_MODE_MASK(adapter->isc);
  2307. trace_kvm_s390_modify_ais_mode(adapter->isc,
  2308. KVM_S390_AIS_MODE_SINGLE, 2);
  2309. }
  2310. out:
  2311. mutex_unlock(&fi->ais_lock);
  2312. return ret;
  2313. }
  2314. static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
  2315. {
  2316. unsigned int id = attr->attr;
  2317. struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
  2318. if (!adapter)
  2319. return -EINVAL;
  2320. return kvm_s390_inject_airq(kvm, adapter);
  2321. }
  2322. static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
  2323. {
  2324. struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
  2325. struct kvm_s390_ais_all ais;
  2326. if (!test_kvm_facility(kvm, 72))
  2327. return -EOPNOTSUPP;
  2328. if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
  2329. return -EFAULT;
  2330. mutex_lock(&fi->ais_lock);
  2331. fi->simm = ais.simm;
  2332. fi->nimm = ais.nimm;
  2333. mutex_unlock(&fi->ais_lock);
  2334. return 0;
  2335. }
  2336. static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  2337. {
  2338. int r = 0;
  2339. unsigned long i;
  2340. struct kvm_vcpu *vcpu;
  2341. switch (attr->group) {
  2342. case KVM_DEV_FLIC_ENQUEUE:
  2343. r = enqueue_floating_irq(dev, attr);
  2344. break;
  2345. case KVM_DEV_FLIC_CLEAR_IRQS:
  2346. kvm_s390_clear_float_irqs(dev->kvm);
  2347. break;
  2348. case KVM_DEV_FLIC_APF_ENABLE:
  2349. dev->kvm->arch.gmap->pfault_enabled = 1;
  2350. break;
  2351. case KVM_DEV_FLIC_APF_DISABLE_WAIT:
  2352. dev->kvm->arch.gmap->pfault_enabled = 0;
  2353. /*
  2354. * Make sure no async faults are in transition when
  2355. * clearing the queues. So we don't need to worry
  2356. * about late coming workers.
  2357. */
  2358. synchronize_srcu(&dev->kvm->srcu);
  2359. kvm_for_each_vcpu(i, vcpu, dev->kvm)
  2360. kvm_clear_async_pf_completion_queue(vcpu);
  2361. break;
  2362. case KVM_DEV_FLIC_ADAPTER_REGISTER:
  2363. r = register_io_adapter(dev, attr);
  2364. break;
  2365. case KVM_DEV_FLIC_ADAPTER_MODIFY:
  2366. r = modify_io_adapter(dev, attr);
  2367. break;
  2368. case KVM_DEV_FLIC_CLEAR_IO_IRQ:
  2369. r = clear_io_irq(dev->kvm, attr);
  2370. break;
  2371. case KVM_DEV_FLIC_AISM:
  2372. r = modify_ais_mode(dev->kvm, attr);
  2373. break;
  2374. case KVM_DEV_FLIC_AIRQ_INJECT:
  2375. r = flic_inject_airq(dev->kvm, attr);
  2376. break;
  2377. case KVM_DEV_FLIC_AISM_ALL:
  2378. r = flic_ais_mode_set_all(dev->kvm, attr);
  2379. break;
  2380. default:
  2381. r = -EINVAL;
  2382. }
  2383. return r;
  2384. }
  2385. static int flic_has_attr(struct kvm_device *dev,
  2386. struct kvm_device_attr *attr)
  2387. {
  2388. switch (attr->group) {
  2389. case KVM_DEV_FLIC_GET_ALL_IRQS:
  2390. case KVM_DEV_FLIC_ENQUEUE:
  2391. case KVM_DEV_FLIC_CLEAR_IRQS:
  2392. case KVM_DEV_FLIC_APF_ENABLE:
  2393. case KVM_DEV_FLIC_APF_DISABLE_WAIT:
  2394. case KVM_DEV_FLIC_ADAPTER_REGISTER:
  2395. case KVM_DEV_FLIC_ADAPTER_MODIFY:
  2396. case KVM_DEV_FLIC_CLEAR_IO_IRQ:
  2397. case KVM_DEV_FLIC_AISM:
  2398. case KVM_DEV_FLIC_AIRQ_INJECT:
  2399. case KVM_DEV_FLIC_AISM_ALL:
  2400. return 0;
  2401. }
  2402. return -ENXIO;
  2403. }
  2404. static int flic_create(struct kvm_device *dev, u32 type)
  2405. {
  2406. if (!dev)
  2407. return -EINVAL;
  2408. if (dev->kvm->arch.flic)
  2409. return -EINVAL;
  2410. dev->kvm->arch.flic = dev;
  2411. return 0;
  2412. }
  2413. static void flic_destroy(struct kvm_device *dev)
  2414. {
  2415. dev->kvm->arch.flic = NULL;
  2416. kfree(dev);
  2417. }
  2418. /* s390 floating irq controller (flic) */
  2419. struct kvm_device_ops kvm_flic_ops = {
  2420. .name = "kvm-flic",
  2421. .get_attr = flic_get_attr,
  2422. .set_attr = flic_set_attr,
  2423. .has_attr = flic_has_attr,
  2424. .create = flic_create,
  2425. .destroy = flic_destroy,
  2426. };
  2427. static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
  2428. {
  2429. unsigned long bit;
  2430. bit = bit_nr + (addr % PAGE_SIZE) * 8;
  2431. return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
  2432. }
  2433. static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
  2434. {
  2435. struct page *page = NULL;
  2436. mmap_read_lock(kvm->mm);
  2437. get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
  2438. &page, NULL, NULL);
  2439. mmap_read_unlock(kvm->mm);
  2440. return page;
  2441. }
  2442. static int adapter_indicators_set(struct kvm *kvm,
  2443. struct s390_io_adapter *adapter,
  2444. struct kvm_s390_adapter_int *adapter_int)
  2445. {
  2446. unsigned long bit;
  2447. int summary_set, idx;
  2448. struct page *ind_page, *summary_page;
  2449. void *map;
  2450. ind_page = get_map_page(kvm, adapter_int->ind_addr);
  2451. if (!ind_page)
  2452. return -1;
  2453. summary_page = get_map_page(kvm, adapter_int->summary_addr);
  2454. if (!summary_page) {
  2455. put_page(ind_page);
  2456. return -1;
  2457. }
  2458. idx = srcu_read_lock(&kvm->srcu);
  2459. map = page_address(ind_page);
  2460. bit = get_ind_bit(adapter_int->ind_addr,
  2461. adapter_int->ind_offset, adapter->swap);
  2462. set_bit(bit, map);
  2463. mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
  2464. set_page_dirty_lock(ind_page);
  2465. map = page_address(summary_page);
  2466. bit = get_ind_bit(adapter_int->summary_addr,
  2467. adapter_int->summary_offset, adapter->swap);
  2468. summary_set = test_and_set_bit(bit, map);
  2469. mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
  2470. set_page_dirty_lock(summary_page);
  2471. srcu_read_unlock(&kvm->srcu, idx);
  2472. put_page(ind_page);
  2473. put_page(summary_page);
  2474. return summary_set ? 0 : 1;
  2475. }
  2476. /*
  2477. * < 0 - not injected due to error
  2478. * = 0 - coalesced, summary indicator already active
  2479. * > 0 - injected interrupt
  2480. */
  2481. static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
  2482. struct kvm *kvm, int irq_source_id, int level,
  2483. bool line_status)
  2484. {
  2485. int ret;
  2486. struct s390_io_adapter *adapter;
  2487. /* We're only interested in the 0->1 transition. */
  2488. if (!level)
  2489. return 0;
  2490. adapter = get_io_adapter(kvm, e->adapter.adapter_id);
  2491. if (!adapter)
  2492. return -1;
  2493. ret = adapter_indicators_set(kvm, adapter, &e->adapter);
  2494. if ((ret > 0) && !adapter->masked) {
  2495. ret = kvm_s390_inject_airq(kvm, adapter);
  2496. if (ret == 0)
  2497. ret = 1;
  2498. }
  2499. return ret;
  2500. }
  2501. /*
  2502. * Inject the machine check to the guest.
  2503. */
  2504. void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
  2505. struct mcck_volatile_info *mcck_info)
  2506. {
  2507. struct kvm_s390_interrupt_info inti;
  2508. struct kvm_s390_irq irq;
  2509. struct kvm_s390_mchk_info *mchk;
  2510. union mci mci;
  2511. __u64 cr14 = 0; /* upper bits are not used */
  2512. int rc;
  2513. mci.val = mcck_info->mcic;
  2514. if (mci.sr)
  2515. cr14 |= CR14_RECOVERY_SUBMASK;
  2516. if (mci.dg)
  2517. cr14 |= CR14_DEGRADATION_SUBMASK;
  2518. if (mci.w)
  2519. cr14 |= CR14_WARNING_SUBMASK;
  2520. mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
  2521. mchk->cr14 = cr14;
  2522. mchk->mcic = mcck_info->mcic;
  2523. mchk->ext_damage_code = mcck_info->ext_damage_code;
  2524. mchk->failing_storage_address = mcck_info->failing_storage_address;
  2525. if (mci.ck) {
  2526. /* Inject the floating machine check */
  2527. inti.type = KVM_S390_MCHK;
  2528. rc = __inject_vm(vcpu->kvm, &inti);
  2529. } else {
  2530. /* Inject the machine check to specified vcpu */
  2531. irq.type = KVM_S390_MCHK;
  2532. rc = kvm_s390_inject_vcpu(vcpu, &irq);
  2533. }
  2534. WARN_ON_ONCE(rc);
  2535. }
  2536. int kvm_set_routing_entry(struct kvm *kvm,
  2537. struct kvm_kernel_irq_routing_entry *e,
  2538. const struct kvm_irq_routing_entry *ue)
  2539. {
  2540. u64 uaddr;
  2541. switch (ue->type) {
  2542. /* we store the userspace addresses instead of the guest addresses */
  2543. case KVM_IRQ_ROUTING_S390_ADAPTER:
  2544. e->set = set_adapter_int;
  2545. uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
  2546. if (uaddr == -EFAULT)
  2547. return -EFAULT;
  2548. e->adapter.summary_addr = uaddr;
  2549. uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
  2550. if (uaddr == -EFAULT)
  2551. return -EFAULT;
  2552. e->adapter.ind_addr = uaddr;
  2553. e->adapter.summary_offset = ue->u.adapter.summary_offset;
  2554. e->adapter.ind_offset = ue->u.adapter.ind_offset;
  2555. e->adapter.adapter_id = ue->u.adapter.adapter_id;
  2556. return 0;
  2557. default:
  2558. return -EINVAL;
  2559. }
  2560. }
  2561. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
  2562. int irq_source_id, int level, bool line_status)
  2563. {
  2564. return -EINVAL;
  2565. }
  2566. int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
  2567. {
  2568. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  2569. struct kvm_s390_irq *buf;
  2570. int r = 0;
  2571. int n;
  2572. buf = vmalloc(len);
  2573. if (!buf)
  2574. return -ENOMEM;
  2575. if (copy_from_user((void *) buf, irqstate, len)) {
  2576. r = -EFAULT;
  2577. goto out_free;
  2578. }
  2579. /*
  2580. * Don't allow setting the interrupt state
  2581. * when there are already interrupts pending
  2582. */
  2583. spin_lock(&li->lock);
  2584. if (li->pending_irqs) {
  2585. r = -EBUSY;
  2586. goto out_unlock;
  2587. }
  2588. for (n = 0; n < len / sizeof(*buf); n++) {
  2589. r = do_inject_vcpu(vcpu, &buf[n]);
  2590. if (r)
  2591. break;
  2592. }
  2593. out_unlock:
  2594. spin_unlock(&li->lock);
  2595. out_free:
  2596. vfree(buf);
  2597. return r;
  2598. }
  2599. static void store_local_irq(struct kvm_s390_local_interrupt *li,
  2600. struct kvm_s390_irq *irq,
  2601. unsigned long irq_type)
  2602. {
  2603. switch (irq_type) {
  2604. case IRQ_PEND_MCHK_EX:
  2605. case IRQ_PEND_MCHK_REP:
  2606. irq->type = KVM_S390_MCHK;
  2607. irq->u.mchk = li->irq.mchk;
  2608. break;
  2609. case IRQ_PEND_PROG:
  2610. irq->type = KVM_S390_PROGRAM_INT;
  2611. irq->u.pgm = li->irq.pgm;
  2612. break;
  2613. case IRQ_PEND_PFAULT_INIT:
  2614. irq->type = KVM_S390_INT_PFAULT_INIT;
  2615. irq->u.ext = li->irq.ext;
  2616. break;
  2617. case IRQ_PEND_EXT_EXTERNAL:
  2618. irq->type = KVM_S390_INT_EXTERNAL_CALL;
  2619. irq->u.extcall = li->irq.extcall;
  2620. break;
  2621. case IRQ_PEND_EXT_CLOCK_COMP:
  2622. irq->type = KVM_S390_INT_CLOCK_COMP;
  2623. break;
  2624. case IRQ_PEND_EXT_CPU_TIMER:
  2625. irq->type = KVM_S390_INT_CPU_TIMER;
  2626. break;
  2627. case IRQ_PEND_SIGP_STOP:
  2628. irq->type = KVM_S390_SIGP_STOP;
  2629. irq->u.stop = li->irq.stop;
  2630. break;
  2631. case IRQ_PEND_RESTART:
  2632. irq->type = KVM_S390_RESTART;
  2633. break;
  2634. case IRQ_PEND_SET_PREFIX:
  2635. irq->type = KVM_S390_SIGP_SET_PREFIX;
  2636. irq->u.prefix = li->irq.prefix;
  2637. break;
  2638. }
  2639. }
  2640. int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
  2641. {
  2642. int scn;
  2643. DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
  2644. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  2645. unsigned long pending_irqs;
  2646. struct kvm_s390_irq irq;
  2647. unsigned long irq_type;
  2648. int cpuaddr;
  2649. int n = 0;
  2650. spin_lock(&li->lock);
  2651. pending_irqs = li->pending_irqs;
  2652. memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
  2653. sizeof(sigp_emerg_pending));
  2654. spin_unlock(&li->lock);
  2655. for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
  2656. memset(&irq, 0, sizeof(irq));
  2657. if (irq_type == IRQ_PEND_EXT_EMERGENCY)
  2658. continue;
  2659. if (n + sizeof(irq) > len)
  2660. return -ENOBUFS;
  2661. store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
  2662. if (copy_to_user(&buf[n], &irq, sizeof(irq)))
  2663. return -EFAULT;
  2664. n += sizeof(irq);
  2665. }
  2666. if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
  2667. for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
  2668. memset(&irq, 0, sizeof(irq));
  2669. if (n + sizeof(irq) > len)
  2670. return -ENOBUFS;
  2671. irq.type = KVM_S390_INT_EMERGENCY;
  2672. irq.u.emerg.code = cpuaddr;
  2673. if (copy_to_user(&buf[n], &irq, sizeof(irq)))
  2674. return -EFAULT;
  2675. n += sizeof(irq);
  2676. }
  2677. }
  2678. if (sca_ext_call_pending(vcpu, &scn)) {
  2679. if (n + sizeof(irq) > len)
  2680. return -ENOBUFS;
  2681. memset(&irq, 0, sizeof(irq));
  2682. irq.type = KVM_S390_INT_EXTERNAL_CALL;
  2683. irq.u.extcall.code = scn;
  2684. if (copy_to_user(&buf[n], &irq, sizeof(irq)))
  2685. return -EFAULT;
  2686. n += sizeof(irq);
  2687. }
  2688. return n;
  2689. }
  2690. static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
  2691. {
  2692. int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
  2693. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2694. struct kvm_vcpu *vcpu;
  2695. u8 vcpu_isc_mask;
  2696. for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
  2697. vcpu = kvm_get_vcpu(kvm, vcpu_idx);
  2698. if (psw_ioint_disabled(vcpu))
  2699. continue;
  2700. vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
  2701. if (deliverable_mask & vcpu_isc_mask) {
  2702. /* lately kicked but not yet running */
  2703. if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
  2704. return;
  2705. kvm_s390_vcpu_wakeup(vcpu);
  2706. return;
  2707. }
  2708. }
  2709. }
  2710. static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
  2711. {
  2712. struct kvm_s390_gisa_interrupt *gi =
  2713. container_of(timer, struct kvm_s390_gisa_interrupt, timer);
  2714. struct kvm *kvm =
  2715. container_of(gi->origin, struct sie_page2, gisa)->kvm;
  2716. u8 pending_mask;
  2717. pending_mask = gisa_get_ipm_or_restore_iam(gi);
  2718. if (pending_mask) {
  2719. __airqs_kick_single_vcpu(kvm, pending_mask);
  2720. hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
  2721. return HRTIMER_RESTART;
  2722. }
  2723. return HRTIMER_NORESTART;
  2724. }
  2725. #define NULL_GISA_ADDR 0x00000000UL
  2726. #define NONE_GISA_ADDR 0x00000001UL
  2727. #define GISA_ADDR_MASK 0xfffff000UL
  2728. static void process_gib_alert_list(void)
  2729. {
  2730. struct kvm_s390_gisa_interrupt *gi;
  2731. struct kvm_s390_gisa *gisa;
  2732. struct kvm *kvm;
  2733. u32 final, origin = 0UL;
  2734. do {
  2735. /*
  2736. * If the NONE_GISA_ADDR is still stored in the alert list
  2737. * origin, we will leave the outer loop. No further GISA has
  2738. * been added to the alert list by millicode while processing
  2739. * the current alert list.
  2740. */
  2741. final = (origin & NONE_GISA_ADDR);
  2742. /*
  2743. * Cut off the alert list and store the NONE_GISA_ADDR in the
  2744. * alert list origin to avoid further GAL interruptions.
  2745. * A new alert list can be build up by millicode in parallel
  2746. * for guests not in the yet cut-off alert list. When in the
  2747. * final loop, store the NULL_GISA_ADDR instead. This will re-
  2748. * enable GAL interruptions on the host again.
  2749. */
  2750. origin = xchg(&gib->alert_list_origin,
  2751. (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
  2752. /*
  2753. * Loop through the just cut-off alert list and start the
  2754. * gisa timers to kick idle vcpus to consume the pending
  2755. * interruptions asap.
  2756. */
  2757. while (origin & GISA_ADDR_MASK) {
  2758. gisa = (struct kvm_s390_gisa *)(u64)origin;
  2759. origin = gisa->next_alert;
  2760. gisa->next_alert = (u32)(u64)gisa;
  2761. kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
  2762. gi = &kvm->arch.gisa_int;
  2763. if (hrtimer_active(&gi->timer))
  2764. hrtimer_cancel(&gi->timer);
  2765. hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
  2766. }
  2767. } while (!final);
  2768. }
  2769. void kvm_s390_gisa_clear(struct kvm *kvm)
  2770. {
  2771. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2772. if (!gi->origin)
  2773. return;
  2774. gisa_clear_ipm(gi->origin);
  2775. VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
  2776. }
  2777. void kvm_s390_gisa_init(struct kvm *kvm)
  2778. {
  2779. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2780. if (!css_general_characteristics.aiv)
  2781. return;
  2782. gi->origin = &kvm->arch.sie_page2->gisa;
  2783. gi->alert.mask = 0;
  2784. spin_lock_init(&gi->alert.ref_lock);
  2785. gi->expires = 50 * 1000; /* 50 usec */
  2786. hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2787. gi->timer.function = gisa_vcpu_kicker;
  2788. memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
  2789. gi->origin->next_alert = (u32)(u64)gi->origin;
  2790. VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
  2791. }
  2792. void kvm_s390_gisa_enable(struct kvm *kvm)
  2793. {
  2794. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2795. struct kvm_vcpu *vcpu;
  2796. unsigned long i;
  2797. u32 gisa_desc;
  2798. if (gi->origin)
  2799. return;
  2800. kvm_s390_gisa_init(kvm);
  2801. gisa_desc = kvm_s390_get_gisa_desc(kvm);
  2802. if (!gisa_desc)
  2803. return;
  2804. kvm_for_each_vcpu(i, vcpu, kvm) {
  2805. mutex_lock(&vcpu->mutex);
  2806. vcpu->arch.sie_block->gd = gisa_desc;
  2807. vcpu->arch.sie_block->eca |= ECA_AIV;
  2808. VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
  2809. vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
  2810. mutex_unlock(&vcpu->mutex);
  2811. }
  2812. }
  2813. void kvm_s390_gisa_destroy(struct kvm *kvm)
  2814. {
  2815. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2816. struct kvm_s390_gisa *gisa = gi->origin;
  2817. if (!gi->origin)
  2818. return;
  2819. if (gi->alert.mask)
  2820. KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
  2821. kvm, gi->alert.mask);
  2822. while (gisa_in_alert_list(gi->origin))
  2823. cpu_relax();
  2824. hrtimer_cancel(&gi->timer);
  2825. gi->origin = NULL;
  2826. VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
  2827. }
  2828. void kvm_s390_gisa_disable(struct kvm *kvm)
  2829. {
  2830. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2831. struct kvm_vcpu *vcpu;
  2832. unsigned long i;
  2833. if (!gi->origin)
  2834. return;
  2835. kvm_for_each_vcpu(i, vcpu, kvm) {
  2836. mutex_lock(&vcpu->mutex);
  2837. vcpu->arch.sie_block->eca &= ~ECA_AIV;
  2838. vcpu->arch.sie_block->gd = 0U;
  2839. mutex_unlock(&vcpu->mutex);
  2840. VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
  2841. }
  2842. kvm_s390_gisa_destroy(kvm);
  2843. }
  2844. /**
  2845. * kvm_s390_gisc_register - register a guest ISC
  2846. *
  2847. * @kvm: the kernel vm to work with
  2848. * @gisc: the guest interruption sub class to register
  2849. *
  2850. * The function extends the vm specific alert mask to use.
  2851. * The effective IAM mask in the GISA is updated as well
  2852. * in case the GISA is not part of the GIB alert list.
  2853. * It will be updated latest when the IAM gets restored
  2854. * by gisa_get_ipm_or_restore_iam().
  2855. *
  2856. * Returns: the nonspecific ISC (NISC) the gib alert mechanism
  2857. * has registered with the channel subsystem.
  2858. * -ENODEV in case the vm uses no GISA
  2859. * -ERANGE in case the guest ISC is invalid
  2860. */
  2861. int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
  2862. {
  2863. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2864. if (!gi->origin)
  2865. return -ENODEV;
  2866. if (gisc > MAX_ISC)
  2867. return -ERANGE;
  2868. spin_lock(&gi->alert.ref_lock);
  2869. gi->alert.ref_count[gisc]++;
  2870. if (gi->alert.ref_count[gisc] == 1) {
  2871. gi->alert.mask |= 0x80 >> gisc;
  2872. gisa_set_iam(gi->origin, gi->alert.mask);
  2873. }
  2874. spin_unlock(&gi->alert.ref_lock);
  2875. return gib->nisc;
  2876. }
  2877. EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
  2878. /**
  2879. * kvm_s390_gisc_unregister - unregister a guest ISC
  2880. *
  2881. * @kvm: the kernel vm to work with
  2882. * @gisc: the guest interruption sub class to register
  2883. *
  2884. * The function reduces the vm specific alert mask to use.
  2885. * The effective IAM mask in the GISA is updated as well
  2886. * in case the GISA is not part of the GIB alert list.
  2887. * It will be updated latest when the IAM gets restored
  2888. * by gisa_get_ipm_or_restore_iam().
  2889. *
  2890. * Returns: the nonspecific ISC (NISC) the gib alert mechanism
  2891. * has registered with the channel subsystem.
  2892. * -ENODEV in case the vm uses no GISA
  2893. * -ERANGE in case the guest ISC is invalid
  2894. * -EINVAL in case the guest ISC is not registered
  2895. */
  2896. int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
  2897. {
  2898. struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
  2899. int rc = 0;
  2900. if (!gi->origin)
  2901. return -ENODEV;
  2902. if (gisc > MAX_ISC)
  2903. return -ERANGE;
  2904. spin_lock(&gi->alert.ref_lock);
  2905. if (gi->alert.ref_count[gisc] == 0) {
  2906. rc = -EINVAL;
  2907. goto out;
  2908. }
  2909. gi->alert.ref_count[gisc]--;
  2910. if (gi->alert.ref_count[gisc] == 0) {
  2911. gi->alert.mask &= ~(0x80 >> gisc);
  2912. gisa_set_iam(gi->origin, gi->alert.mask);
  2913. }
  2914. out:
  2915. spin_unlock(&gi->alert.ref_lock);
  2916. return rc;
  2917. }
  2918. EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
  2919. static void aen_host_forward(unsigned long si)
  2920. {
  2921. struct kvm_s390_gisa_interrupt *gi;
  2922. struct zpci_gaite *gaite;
  2923. struct kvm *kvm;
  2924. gaite = (struct zpci_gaite *)aift->gait +
  2925. (si * sizeof(struct zpci_gaite));
  2926. if (gaite->count == 0)
  2927. return;
  2928. if (gaite->aisb != 0)
  2929. set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
  2930. kvm = kvm_s390_pci_si_to_kvm(aift, si);
  2931. if (!kvm)
  2932. return;
  2933. gi = &kvm->arch.gisa_int;
  2934. if (!(gi->origin->g1.simm & AIS_MODE_MASK(gaite->gisc)) ||
  2935. !(gi->origin->g1.nimm & AIS_MODE_MASK(gaite->gisc))) {
  2936. gisa_set_ipm_gisc(gi->origin, gaite->gisc);
  2937. if (hrtimer_active(&gi->timer))
  2938. hrtimer_cancel(&gi->timer);
  2939. hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
  2940. kvm->stat.aen_forward++;
  2941. }
  2942. }
  2943. static void aen_process_gait(u8 isc)
  2944. {
  2945. bool found = false, first = true;
  2946. union zpci_sic_iib iib = {{0}};
  2947. unsigned long si, flags;
  2948. spin_lock_irqsave(&aift->gait_lock, flags);
  2949. if (!aift->gait) {
  2950. spin_unlock_irqrestore(&aift->gait_lock, flags);
  2951. return;
  2952. }
  2953. for (si = 0;;) {
  2954. /* Scan adapter summary indicator bit vector */
  2955. si = airq_iv_scan(aift->sbv, si, airq_iv_end(aift->sbv));
  2956. if (si == -1UL) {
  2957. if (first || found) {
  2958. /* Re-enable interrupts. */
  2959. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, isc,
  2960. &iib);
  2961. first = found = false;
  2962. } else {
  2963. /* Interrupts on and all bits processed */
  2964. break;
  2965. }
  2966. found = false;
  2967. si = 0;
  2968. /* Scan again after re-enabling interrupts */
  2969. continue;
  2970. }
  2971. found = true;
  2972. aen_host_forward(si);
  2973. }
  2974. spin_unlock_irqrestore(&aift->gait_lock, flags);
  2975. }
  2976. static void gib_alert_irq_handler(struct airq_struct *airq,
  2977. struct tpi_info *tpi_info)
  2978. {
  2979. struct tpi_adapter_info *info = (struct tpi_adapter_info *)tpi_info;
  2980. inc_irq_stat(IRQIO_GAL);
  2981. if ((info->forward || info->error) &&
  2982. IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
  2983. aen_process_gait(info->isc);
  2984. if (info->aism != 0)
  2985. process_gib_alert_list();
  2986. } else {
  2987. process_gib_alert_list();
  2988. }
  2989. }
  2990. static struct airq_struct gib_alert_irq = {
  2991. .handler = gib_alert_irq_handler,
  2992. .lsi_ptr = &gib_alert_irq.lsi_mask,
  2993. };
  2994. void kvm_s390_gib_destroy(void)
  2995. {
  2996. if (!gib)
  2997. return;
  2998. if (kvm_s390_pci_interp_allowed() && aift) {
  2999. mutex_lock(&aift->aift_lock);
  3000. kvm_s390_pci_aen_exit();
  3001. mutex_unlock(&aift->aift_lock);
  3002. }
  3003. chsc_sgib(0);
  3004. unregister_adapter_interrupt(&gib_alert_irq);
  3005. free_page((unsigned long)gib);
  3006. gib = NULL;
  3007. }
  3008. int kvm_s390_gib_init(u8 nisc)
  3009. {
  3010. int rc = 0;
  3011. if (!css_general_characteristics.aiv) {
  3012. KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
  3013. goto out;
  3014. }
  3015. gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
  3016. if (!gib) {
  3017. rc = -ENOMEM;
  3018. goto out;
  3019. }
  3020. gib_alert_irq.isc = nisc;
  3021. if (register_adapter_interrupt(&gib_alert_irq)) {
  3022. pr_err("Registering the GIB alert interruption handler failed\n");
  3023. rc = -EIO;
  3024. goto out_free_gib;
  3025. }
  3026. gib->nisc = nisc;
  3027. if (chsc_sgib((u32)(u64)gib)) {
  3028. pr_err("Associating the GIB with the AIV facility failed\n");
  3029. free_page((unsigned long)gib);
  3030. gib = NULL;
  3031. rc = -EIO;
  3032. goto out_unreg_gal;
  3033. }
  3034. if (kvm_s390_pci_interp_allowed()) {
  3035. if (kvm_s390_pci_aen_init(nisc)) {
  3036. pr_err("Initializing AEN for PCI failed\n");
  3037. rc = -EIO;
  3038. goto out_unreg_gal;
  3039. }
  3040. }
  3041. KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
  3042. goto out;
  3043. out_unreg_gal:
  3044. unregister_adapter_interrupt(&gib_alert_irq);
  3045. out_free_gib:
  3046. free_page((unsigned long)gib);
  3047. gib = NULL;
  3048. out:
  3049. return rc;
  3050. }