emulate.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Instruction/Exception emulation
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <[email protected]>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/ktime.h>
  14. #include <linux/kvm_host.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/fs.h>
  17. #include <linux/memblock.h>
  18. #include <linux/random.h>
  19. #include <asm/page.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/cacheops.h>
  22. #include <asm/cpu-info.h>
  23. #include <asm/mmu_context.h>
  24. #include <asm/tlbflush.h>
  25. #include <asm/inst.h>
  26. #undef CONFIG_MIPS_MT
  27. #include <asm/r4kcache.h>
  28. #define CONFIG_MIPS_MT
  29. #include "interrupt.h"
  30. #include "trace.h"
  31. /*
  32. * Compute the return address and do emulate branch simulation, if required.
  33. * This function should be called only in branch delay slot active.
  34. */
  35. static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
  36. unsigned long *out)
  37. {
  38. unsigned int dspcontrol;
  39. union mips_instruction insn;
  40. struct kvm_vcpu_arch *arch = &vcpu->arch;
  41. long epc = instpc;
  42. long nextpc;
  43. int err;
  44. if (epc & 3) {
  45. kvm_err("%s: unaligned epc\n", __func__);
  46. return -EINVAL;
  47. }
  48. /* Read the instruction */
  49. err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
  50. if (err)
  51. return err;
  52. switch (insn.i_format.opcode) {
  53. /* jr and jalr are in r_format format. */
  54. case spec_op:
  55. switch (insn.r_format.func) {
  56. case jalr_op:
  57. arch->gprs[insn.r_format.rd] = epc + 8;
  58. fallthrough;
  59. case jr_op:
  60. nextpc = arch->gprs[insn.r_format.rs];
  61. break;
  62. default:
  63. return -EINVAL;
  64. }
  65. break;
  66. /*
  67. * This group contains:
  68. * bltz_op, bgez_op, bltzl_op, bgezl_op,
  69. * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  70. */
  71. case bcond_op:
  72. switch (insn.i_format.rt) {
  73. case bltz_op:
  74. case bltzl_op:
  75. if ((long)arch->gprs[insn.i_format.rs] < 0)
  76. epc = epc + 4 + (insn.i_format.simmediate << 2);
  77. else
  78. epc += 8;
  79. nextpc = epc;
  80. break;
  81. case bgez_op:
  82. case bgezl_op:
  83. if ((long)arch->gprs[insn.i_format.rs] >= 0)
  84. epc = epc + 4 + (insn.i_format.simmediate << 2);
  85. else
  86. epc += 8;
  87. nextpc = epc;
  88. break;
  89. case bltzal_op:
  90. case bltzall_op:
  91. arch->gprs[31] = epc + 8;
  92. if ((long)arch->gprs[insn.i_format.rs] < 0)
  93. epc = epc + 4 + (insn.i_format.simmediate << 2);
  94. else
  95. epc += 8;
  96. nextpc = epc;
  97. break;
  98. case bgezal_op:
  99. case bgezall_op:
  100. arch->gprs[31] = epc + 8;
  101. if ((long)arch->gprs[insn.i_format.rs] >= 0)
  102. epc = epc + 4 + (insn.i_format.simmediate << 2);
  103. else
  104. epc += 8;
  105. nextpc = epc;
  106. break;
  107. case bposge32_op:
  108. if (!cpu_has_dsp) {
  109. kvm_err("%s: DSP branch but not DSP ASE\n",
  110. __func__);
  111. return -EINVAL;
  112. }
  113. dspcontrol = rddsp(0x01);
  114. if (dspcontrol >= 32)
  115. epc = epc + 4 + (insn.i_format.simmediate << 2);
  116. else
  117. epc += 8;
  118. nextpc = epc;
  119. break;
  120. default:
  121. return -EINVAL;
  122. }
  123. break;
  124. /* These are unconditional and in j_format. */
  125. case jal_op:
  126. arch->gprs[31] = instpc + 8;
  127. fallthrough;
  128. case j_op:
  129. epc += 4;
  130. epc >>= 28;
  131. epc <<= 28;
  132. epc |= (insn.j_format.target << 2);
  133. nextpc = epc;
  134. break;
  135. /* These are conditional and in i_format. */
  136. case beq_op:
  137. case beql_op:
  138. if (arch->gprs[insn.i_format.rs] ==
  139. arch->gprs[insn.i_format.rt])
  140. epc = epc + 4 + (insn.i_format.simmediate << 2);
  141. else
  142. epc += 8;
  143. nextpc = epc;
  144. break;
  145. case bne_op:
  146. case bnel_op:
  147. if (arch->gprs[insn.i_format.rs] !=
  148. arch->gprs[insn.i_format.rt])
  149. epc = epc + 4 + (insn.i_format.simmediate << 2);
  150. else
  151. epc += 8;
  152. nextpc = epc;
  153. break;
  154. case blez_op: /* POP06 */
  155. #ifndef CONFIG_CPU_MIPSR6
  156. case blezl_op: /* removed in R6 */
  157. #endif
  158. if (insn.i_format.rt != 0)
  159. goto compact_branch;
  160. if ((long)arch->gprs[insn.i_format.rs] <= 0)
  161. epc = epc + 4 + (insn.i_format.simmediate << 2);
  162. else
  163. epc += 8;
  164. nextpc = epc;
  165. break;
  166. case bgtz_op: /* POP07 */
  167. #ifndef CONFIG_CPU_MIPSR6
  168. case bgtzl_op: /* removed in R6 */
  169. #endif
  170. if (insn.i_format.rt != 0)
  171. goto compact_branch;
  172. if ((long)arch->gprs[insn.i_format.rs] > 0)
  173. epc = epc + 4 + (insn.i_format.simmediate << 2);
  174. else
  175. epc += 8;
  176. nextpc = epc;
  177. break;
  178. /* And now the FPA/cp1 branch instructions. */
  179. case cop1_op:
  180. kvm_err("%s: unsupported cop1_op\n", __func__);
  181. return -EINVAL;
  182. #ifdef CONFIG_CPU_MIPSR6
  183. /* R6 added the following compact branches with forbidden slots */
  184. case blezl_op: /* POP26 */
  185. case bgtzl_op: /* POP27 */
  186. /* only rt == 0 isn't compact branch */
  187. if (insn.i_format.rt != 0)
  188. goto compact_branch;
  189. return -EINVAL;
  190. case pop10_op:
  191. case pop30_op:
  192. /* only rs == rt == 0 is reserved, rest are compact branches */
  193. if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
  194. goto compact_branch;
  195. return -EINVAL;
  196. case pop66_op:
  197. case pop76_op:
  198. /* only rs == 0 isn't compact branch */
  199. if (insn.i_format.rs != 0)
  200. goto compact_branch;
  201. return -EINVAL;
  202. compact_branch:
  203. /*
  204. * If we've hit an exception on the forbidden slot, then
  205. * the branch must not have been taken.
  206. */
  207. epc += 8;
  208. nextpc = epc;
  209. break;
  210. #else
  211. compact_branch:
  212. /* Fall through - Compact branches not supported before R6 */
  213. #endif
  214. default:
  215. return -EINVAL;
  216. }
  217. *out = nextpc;
  218. return 0;
  219. }
  220. enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
  221. {
  222. int err;
  223. if (cause & CAUSEF_BD) {
  224. err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
  225. &vcpu->arch.pc);
  226. if (err)
  227. return EMULATE_FAIL;
  228. } else {
  229. vcpu->arch.pc += 4;
  230. }
  231. kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
  232. return EMULATE_DONE;
  233. }
  234. /**
  235. * kvm_get_badinstr() - Get bad instruction encoding.
  236. * @opc: Guest pointer to faulting instruction.
  237. * @vcpu: KVM VCPU information.
  238. *
  239. * Gets the instruction encoding of the faulting instruction, using the saved
  240. * BadInstr register value if it exists, otherwise falling back to reading guest
  241. * memory at @opc.
  242. *
  243. * Returns: The instruction encoding of the faulting instruction.
  244. */
  245. int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
  246. {
  247. if (cpu_has_badinstr) {
  248. *out = vcpu->arch.host_cp0_badinstr;
  249. return 0;
  250. } else {
  251. WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
  252. return -EINVAL;
  253. }
  254. }
  255. /**
  256. * kvm_get_badinstrp() - Get bad prior instruction encoding.
  257. * @opc: Guest pointer to prior faulting instruction.
  258. * @vcpu: KVM VCPU information.
  259. *
  260. * Gets the instruction encoding of the prior faulting instruction (the branch
  261. * containing the delay slot which faulted), using the saved BadInstrP register
  262. * value if it exists, otherwise falling back to reading guest memory at @opc.
  263. *
  264. * Returns: The instruction encoding of the prior faulting instruction.
  265. */
  266. int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
  267. {
  268. if (cpu_has_badinstrp) {
  269. *out = vcpu->arch.host_cp0_badinstrp;
  270. return 0;
  271. } else {
  272. WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
  273. return -EINVAL;
  274. }
  275. }
  276. /**
  277. * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
  278. * @vcpu: Virtual CPU.
  279. *
  280. * Returns: 1 if the CP0_Count timer is disabled by either the guest
  281. * CP0_Cause.DC bit or the count_ctl.DC bit.
  282. * 0 otherwise (in which case CP0_Count timer is running).
  283. */
  284. int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
  285. {
  286. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  287. return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
  288. (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
  289. }
  290. /**
  291. * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
  292. *
  293. * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
  294. *
  295. * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
  296. */
  297. static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
  298. {
  299. s64 now_ns, periods;
  300. u64 delta;
  301. now_ns = ktime_to_ns(now);
  302. delta = now_ns + vcpu->arch.count_dyn_bias;
  303. if (delta >= vcpu->arch.count_period) {
  304. /* If delta is out of safe range the bias needs adjusting */
  305. periods = div64_s64(now_ns, vcpu->arch.count_period);
  306. vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
  307. /* Recalculate delta with new bias */
  308. delta = now_ns + vcpu->arch.count_dyn_bias;
  309. }
  310. /*
  311. * We've ensured that:
  312. * delta < count_period
  313. *
  314. * Therefore the intermediate delta*count_hz will never overflow since
  315. * at the boundary condition:
  316. * delta = count_period
  317. * delta = NSEC_PER_SEC * 2^32 / count_hz
  318. * delta * count_hz = NSEC_PER_SEC * 2^32
  319. */
  320. return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
  321. }
  322. /**
  323. * kvm_mips_count_time() - Get effective current time.
  324. * @vcpu: Virtual CPU.
  325. *
  326. * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
  327. * except when the master disable bit is set in count_ctl, in which case it is
  328. * count_resume, i.e. the time that the count was disabled.
  329. *
  330. * Returns: Effective monotonic ktime for CP0_Count.
  331. */
  332. static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
  333. {
  334. if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
  335. return vcpu->arch.count_resume;
  336. return ktime_get();
  337. }
  338. /**
  339. * kvm_mips_read_count_running() - Read the current count value as if running.
  340. * @vcpu: Virtual CPU.
  341. * @now: Kernel time to read CP0_Count at.
  342. *
  343. * Returns the current guest CP0_Count register at time @now and handles if the
  344. * timer interrupt is pending and hasn't been handled yet.
  345. *
  346. * Returns: The current value of the guest CP0_Count register.
  347. */
  348. static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
  349. {
  350. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  351. ktime_t expires, threshold;
  352. u32 count, compare;
  353. int running;
  354. /* Calculate the biased and scaled guest CP0_Count */
  355. count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
  356. compare = kvm_read_c0_guest_compare(cop0);
  357. /*
  358. * Find whether CP0_Count has reached the closest timer interrupt. If
  359. * not, we shouldn't inject it.
  360. */
  361. if ((s32)(count - compare) < 0)
  362. return count;
  363. /*
  364. * The CP0_Count we're going to return has already reached the closest
  365. * timer interrupt. Quickly check if it really is a new interrupt by
  366. * looking at whether the interval until the hrtimer expiry time is
  367. * less than 1/4 of the timer period.
  368. */
  369. expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
  370. threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
  371. if (ktime_before(expires, threshold)) {
  372. /*
  373. * Cancel it while we handle it so there's no chance of
  374. * interference with the timeout handler.
  375. */
  376. running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
  377. /* Nothing should be waiting on the timeout */
  378. kvm_mips_callbacks->queue_timer_int(vcpu);
  379. /*
  380. * Restart the timer if it was running based on the expiry time
  381. * we read, so that we don't push it back 2 periods.
  382. */
  383. if (running) {
  384. expires = ktime_add_ns(expires,
  385. vcpu->arch.count_period);
  386. hrtimer_start(&vcpu->arch.comparecount_timer, expires,
  387. HRTIMER_MODE_ABS);
  388. }
  389. }
  390. return count;
  391. }
  392. /**
  393. * kvm_mips_read_count() - Read the current count value.
  394. * @vcpu: Virtual CPU.
  395. *
  396. * Read the current guest CP0_Count value, taking into account whether the timer
  397. * is stopped.
  398. *
  399. * Returns: The current guest CP0_Count value.
  400. */
  401. u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
  402. {
  403. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  404. /* If count disabled just read static copy of count */
  405. if (kvm_mips_count_disabled(vcpu))
  406. return kvm_read_c0_guest_count(cop0);
  407. return kvm_mips_read_count_running(vcpu, ktime_get());
  408. }
  409. /**
  410. * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
  411. * @vcpu: Virtual CPU.
  412. * @count: Output pointer for CP0_Count value at point of freeze.
  413. *
  414. * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
  415. * at the point it was frozen. It is guaranteed that any pending interrupts at
  416. * the point it was frozen are handled, and none after that point.
  417. *
  418. * This is useful where the time/CP0_Count is needed in the calculation of the
  419. * new parameters.
  420. *
  421. * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
  422. *
  423. * Returns: The ktime at the point of freeze.
  424. */
  425. ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
  426. {
  427. ktime_t now;
  428. /* stop hrtimer before finding time */
  429. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  430. now = ktime_get();
  431. /* find count at this point and handle pending hrtimer */
  432. *count = kvm_mips_read_count_running(vcpu, now);
  433. return now;
  434. }
  435. /**
  436. * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
  437. * @vcpu: Virtual CPU.
  438. * @now: ktime at point of resume.
  439. * @count: CP0_Count at point of resume.
  440. *
  441. * Resumes the timer and updates the timer expiry based on @now and @count.
  442. * This can be used in conjunction with kvm_mips_freeze_timer() when timer
  443. * parameters need to be changed.
  444. *
  445. * It is guaranteed that a timer interrupt immediately after resume will be
  446. * handled, but not if CP_Compare is exactly at @count. That case is already
  447. * handled by kvm_mips_freeze_timer().
  448. *
  449. * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
  450. */
  451. static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
  452. ktime_t now, u32 count)
  453. {
  454. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  455. u32 compare;
  456. u64 delta;
  457. ktime_t expire;
  458. /* Calculate timeout (wrap 0 to 2^32) */
  459. compare = kvm_read_c0_guest_compare(cop0);
  460. delta = (u64)(u32)(compare - count - 1) + 1;
  461. delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
  462. expire = ktime_add_ns(now, delta);
  463. /* Update hrtimer to use new timeout */
  464. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  465. hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
  466. }
  467. /**
  468. * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
  469. * @vcpu: Virtual CPU.
  470. * @before: Time before Count was saved, lower bound of drift calculation.
  471. * @count: CP0_Count at point of restore.
  472. * @min_drift: Minimum amount of drift permitted before correction.
  473. * Must be <= 0.
  474. *
  475. * Restores the timer from a particular @count, accounting for drift. This can
  476. * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
  477. * to be used for a period of time, but the exact ktime corresponding to the
  478. * final Count that must be restored is not known.
  479. *
  480. * It is gauranteed that a timer interrupt immediately after restore will be
  481. * handled, but not if CP0_Compare is exactly at @count. That case should
  482. * already be handled when the hardware timer state is saved.
  483. *
  484. * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
  485. * stopped).
  486. *
  487. * Returns: Amount of correction to count_bias due to drift.
  488. */
  489. int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
  490. u32 count, int min_drift)
  491. {
  492. ktime_t now, count_time;
  493. u32 now_count, before_count;
  494. u64 delta;
  495. int drift, ret = 0;
  496. /* Calculate expected count at before */
  497. before_count = vcpu->arch.count_bias +
  498. kvm_mips_ktime_to_count(vcpu, before);
  499. /*
  500. * Detect significantly negative drift, where count is lower than
  501. * expected. Some negative drift is expected when hardware counter is
  502. * set after kvm_mips_freeze_timer(), and it is harmless to allow the
  503. * time to jump forwards a little, within reason. If the drift is too
  504. * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
  505. */
  506. drift = count - before_count;
  507. if (drift < min_drift) {
  508. count_time = before;
  509. vcpu->arch.count_bias += drift;
  510. ret = drift;
  511. goto resume;
  512. }
  513. /* Calculate expected count right now */
  514. now = ktime_get();
  515. now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
  516. /*
  517. * Detect positive drift, where count is higher than expected, and
  518. * adjust the bias to avoid guest time going backwards.
  519. */
  520. drift = count - now_count;
  521. if (drift > 0) {
  522. count_time = now;
  523. vcpu->arch.count_bias += drift;
  524. ret = drift;
  525. goto resume;
  526. }
  527. /* Subtract nanosecond delta to find ktime when count was read */
  528. delta = (u64)(u32)(now_count - count);
  529. delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
  530. count_time = ktime_sub_ns(now, delta);
  531. resume:
  532. /* Resume using the calculated ktime */
  533. kvm_mips_resume_hrtimer(vcpu, count_time, count);
  534. return ret;
  535. }
  536. /**
  537. * kvm_mips_write_count() - Modify the count and update timer.
  538. * @vcpu: Virtual CPU.
  539. * @count: Guest CP0_Count value to set.
  540. *
  541. * Sets the CP0_Count value and updates the timer accordingly.
  542. */
  543. void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
  544. {
  545. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  546. ktime_t now;
  547. /* Calculate bias */
  548. now = kvm_mips_count_time(vcpu);
  549. vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
  550. if (kvm_mips_count_disabled(vcpu))
  551. /* The timer's disabled, adjust the static count */
  552. kvm_write_c0_guest_count(cop0, count);
  553. else
  554. /* Update timeout */
  555. kvm_mips_resume_hrtimer(vcpu, now, count);
  556. }
  557. /**
  558. * kvm_mips_init_count() - Initialise timer.
  559. * @vcpu: Virtual CPU.
  560. * @count_hz: Frequency of timer.
  561. *
  562. * Initialise the timer to the specified frequency, zero it, and set it going if
  563. * it's enabled.
  564. */
  565. void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
  566. {
  567. vcpu->arch.count_hz = count_hz;
  568. vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
  569. vcpu->arch.count_dyn_bias = 0;
  570. /* Starting at 0 */
  571. kvm_mips_write_count(vcpu, 0);
  572. }
  573. /**
  574. * kvm_mips_set_count_hz() - Update the frequency of the timer.
  575. * @vcpu: Virtual CPU.
  576. * @count_hz: Frequency of CP0_Count timer in Hz.
  577. *
  578. * Change the frequency of the CP0_Count timer. This is done atomically so that
  579. * CP0_Count is continuous and no timer interrupt is lost.
  580. *
  581. * Returns: -EINVAL if @count_hz is out of range.
  582. * 0 on success.
  583. */
  584. int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
  585. {
  586. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  587. int dc;
  588. ktime_t now;
  589. u32 count;
  590. /* ensure the frequency is in a sensible range... */
  591. if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
  592. return -EINVAL;
  593. /* ... and has actually changed */
  594. if (vcpu->arch.count_hz == count_hz)
  595. return 0;
  596. /* Safely freeze timer so we can keep it continuous */
  597. dc = kvm_mips_count_disabled(vcpu);
  598. if (dc) {
  599. now = kvm_mips_count_time(vcpu);
  600. count = kvm_read_c0_guest_count(cop0);
  601. } else {
  602. now = kvm_mips_freeze_hrtimer(vcpu, &count);
  603. }
  604. /* Update the frequency */
  605. vcpu->arch.count_hz = count_hz;
  606. vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
  607. vcpu->arch.count_dyn_bias = 0;
  608. /* Calculate adjusted bias so dynamic count is unchanged */
  609. vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
  610. /* Update and resume hrtimer */
  611. if (!dc)
  612. kvm_mips_resume_hrtimer(vcpu, now, count);
  613. return 0;
  614. }
  615. /**
  616. * kvm_mips_write_compare() - Modify compare and update timer.
  617. * @vcpu: Virtual CPU.
  618. * @compare: New CP0_Compare value.
  619. * @ack: Whether to acknowledge timer interrupt.
  620. *
  621. * Update CP0_Compare to a new value and update the timeout.
  622. * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
  623. * any pending timer interrupt is preserved.
  624. */
  625. void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
  626. {
  627. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  628. int dc;
  629. u32 old_compare = kvm_read_c0_guest_compare(cop0);
  630. s32 delta = compare - old_compare;
  631. u32 cause;
  632. ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
  633. u32 count;
  634. /* if unchanged, must just be an ack */
  635. if (old_compare == compare) {
  636. if (!ack)
  637. return;
  638. kvm_mips_callbacks->dequeue_timer_int(vcpu);
  639. kvm_write_c0_guest_compare(cop0, compare);
  640. return;
  641. }
  642. /*
  643. * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
  644. * too to prevent guest CP0_Count hitting guest CP0_Compare.
  645. *
  646. * The new GTOffset corresponds to the new value of CP0_Compare, and is
  647. * set prior to it being written into the guest context. We disable
  648. * preemption until the new value is written to prevent restore of a
  649. * GTOffset corresponding to the old CP0_Compare value.
  650. */
  651. if (delta > 0) {
  652. preempt_disable();
  653. write_c0_gtoffset(compare - read_c0_count());
  654. back_to_back_c0_hazard();
  655. }
  656. /* freeze_hrtimer() takes care of timer interrupts <= count */
  657. dc = kvm_mips_count_disabled(vcpu);
  658. if (!dc)
  659. now = kvm_mips_freeze_hrtimer(vcpu, &count);
  660. if (ack)
  661. kvm_mips_callbacks->dequeue_timer_int(vcpu);
  662. else
  663. /*
  664. * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
  665. * preserve guest CP0_Cause.TI if we don't want to ack it.
  666. */
  667. cause = kvm_read_c0_guest_cause(cop0);
  668. kvm_write_c0_guest_compare(cop0, compare);
  669. if (delta > 0)
  670. preempt_enable();
  671. back_to_back_c0_hazard();
  672. if (!ack && cause & CAUSEF_TI)
  673. kvm_write_c0_guest_cause(cop0, cause);
  674. /* resume_hrtimer() takes care of timer interrupts > count */
  675. if (!dc)
  676. kvm_mips_resume_hrtimer(vcpu, now, count);
  677. /*
  678. * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
  679. * until after the new CP0_Compare is written, otherwise new guest
  680. * CP0_Count could hit new guest CP0_Compare.
  681. */
  682. if (delta <= 0)
  683. write_c0_gtoffset(compare - read_c0_count());
  684. }
  685. /**
  686. * kvm_mips_count_disable() - Disable count.
  687. * @vcpu: Virtual CPU.
  688. *
  689. * Disable the CP0_Count timer. A timer interrupt on or before the final stop
  690. * time will be handled but not after.
  691. *
  692. * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
  693. * count_ctl.DC has been set (count disabled).
  694. *
  695. * Returns: The time that the timer was stopped.
  696. */
  697. static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
  698. {
  699. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  700. u32 count;
  701. ktime_t now;
  702. /* Stop hrtimer */
  703. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  704. /* Set the static count from the dynamic count, handling pending TI */
  705. now = ktime_get();
  706. count = kvm_mips_read_count_running(vcpu, now);
  707. kvm_write_c0_guest_count(cop0, count);
  708. return now;
  709. }
  710. /**
  711. * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
  712. * @vcpu: Virtual CPU.
  713. *
  714. * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
  715. * before the final stop time will be handled if the timer isn't disabled by
  716. * count_ctl.DC, but not after.
  717. *
  718. * Assumes CP0_Cause.DC is clear (count enabled).
  719. */
  720. void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
  721. {
  722. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  723. kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
  724. if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
  725. kvm_mips_count_disable(vcpu);
  726. }
  727. /**
  728. * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
  729. * @vcpu: Virtual CPU.
  730. *
  731. * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
  732. * the start time will be handled if the timer isn't disabled by count_ctl.DC,
  733. * potentially before even returning, so the caller should be careful with
  734. * ordering of CP0_Cause modifications so as not to lose it.
  735. *
  736. * Assumes CP0_Cause.DC is set (count disabled).
  737. */
  738. void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
  739. {
  740. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  741. u32 count;
  742. kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
  743. /*
  744. * Set the dynamic count to match the static count.
  745. * This starts the hrtimer if count_ctl.DC allows it.
  746. * Otherwise it conveniently updates the biases.
  747. */
  748. count = kvm_read_c0_guest_count(cop0);
  749. kvm_mips_write_count(vcpu, count);
  750. }
  751. /**
  752. * kvm_mips_set_count_ctl() - Update the count control KVM register.
  753. * @vcpu: Virtual CPU.
  754. * @count_ctl: Count control register new value.
  755. *
  756. * Set the count control KVM register. The timer is updated accordingly.
  757. *
  758. * Returns: -EINVAL if reserved bits are set.
  759. * 0 on success.
  760. */
  761. int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
  762. {
  763. struct mips_coproc *cop0 = &vcpu->arch.cop0;
  764. s64 changed = count_ctl ^ vcpu->arch.count_ctl;
  765. s64 delta;
  766. ktime_t expire, now;
  767. u32 count, compare;
  768. /* Only allow defined bits to be changed */
  769. if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
  770. return -EINVAL;
  771. /* Apply new value */
  772. vcpu->arch.count_ctl = count_ctl;
  773. /* Master CP0_Count disable */
  774. if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
  775. /* Is CP0_Cause.DC already disabling CP0_Count? */
  776. if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
  777. if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
  778. /* Just record the current time */
  779. vcpu->arch.count_resume = ktime_get();
  780. } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
  781. /* disable timer and record current time */
  782. vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
  783. } else {
  784. /*
  785. * Calculate timeout relative to static count at resume
  786. * time (wrap 0 to 2^32).
  787. */
  788. count = kvm_read_c0_guest_count(cop0);
  789. compare = kvm_read_c0_guest_compare(cop0);
  790. delta = (u64)(u32)(compare - count - 1) + 1;
  791. delta = div_u64(delta * NSEC_PER_SEC,
  792. vcpu->arch.count_hz);
  793. expire = ktime_add_ns(vcpu->arch.count_resume, delta);
  794. /* Handle pending interrupt */
  795. now = ktime_get();
  796. if (ktime_compare(now, expire) >= 0)
  797. /* Nothing should be waiting on the timeout */
  798. kvm_mips_callbacks->queue_timer_int(vcpu);
  799. /* Resume hrtimer without changing bias */
  800. count = kvm_mips_read_count_running(vcpu, now);
  801. kvm_mips_resume_hrtimer(vcpu, now, count);
  802. }
  803. }
  804. return 0;
  805. }
  806. /**
  807. * kvm_mips_set_count_resume() - Update the count resume KVM register.
  808. * @vcpu: Virtual CPU.
  809. * @count_resume: Count resume register new value.
  810. *
  811. * Set the count resume KVM register.
  812. *
  813. * Returns: -EINVAL if out of valid range (0..now).
  814. * 0 on success.
  815. */
  816. int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
  817. {
  818. /*
  819. * It doesn't make sense for the resume time to be in the future, as it
  820. * would be possible for the next interrupt to be more than a full
  821. * period in the future.
  822. */
  823. if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
  824. return -EINVAL;
  825. vcpu->arch.count_resume = ns_to_ktime(count_resume);
  826. return 0;
  827. }
  828. /**
  829. * kvm_mips_count_timeout() - Push timer forward on timeout.
  830. * @vcpu: Virtual CPU.
  831. *
  832. * Handle an hrtimer event by push the hrtimer forward a period.
  833. *
  834. * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
  835. */
  836. enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
  837. {
  838. /* Add the Count period to the current expiry time */
  839. hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
  840. vcpu->arch.count_period);
  841. return HRTIMER_RESTART;
  842. }
  843. enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
  844. {
  845. kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
  846. vcpu->arch.pending_exceptions);
  847. ++vcpu->stat.wait_exits;
  848. trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
  849. if (!vcpu->arch.pending_exceptions) {
  850. kvm_vz_lose_htimer(vcpu);
  851. vcpu->arch.wait = 1;
  852. kvm_vcpu_halt(vcpu);
  853. /*
  854. * We are runnable, then definitely go off to user space to
  855. * check if any I/O interrupts are pending.
  856. */
  857. if (kvm_arch_vcpu_runnable(vcpu))
  858. vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  859. }
  860. return EMULATE_DONE;
  861. }
  862. enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
  863. u32 cause,
  864. struct kvm_vcpu *vcpu)
  865. {
  866. int r;
  867. enum emulation_result er;
  868. u32 rt;
  869. struct kvm_run *run = vcpu->run;
  870. void *data = run->mmio.data;
  871. unsigned int imme;
  872. unsigned long curr_pc;
  873. /*
  874. * Update PC and hold onto current PC in case there is
  875. * an error and we want to rollback the PC
  876. */
  877. curr_pc = vcpu->arch.pc;
  878. er = update_pc(vcpu, cause);
  879. if (er == EMULATE_FAIL)
  880. return er;
  881. rt = inst.i_format.rt;
  882. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  883. vcpu->arch.host_cp0_badvaddr);
  884. if (run->mmio.phys_addr == KVM_INVALID_ADDR)
  885. goto out_fail;
  886. switch (inst.i_format.opcode) {
  887. #if defined(CONFIG_64BIT)
  888. case sd_op:
  889. run->mmio.len = 8;
  890. *(u64 *)data = vcpu->arch.gprs[rt];
  891. kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
  892. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  893. vcpu->arch.gprs[rt], *(u64 *)data);
  894. break;
  895. #endif
  896. case sw_op:
  897. run->mmio.len = 4;
  898. *(u32 *)data = vcpu->arch.gprs[rt];
  899. kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  900. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  901. vcpu->arch.gprs[rt], *(u32 *)data);
  902. break;
  903. case sh_op:
  904. run->mmio.len = 2;
  905. *(u16 *)data = vcpu->arch.gprs[rt];
  906. kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  907. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  908. vcpu->arch.gprs[rt], *(u16 *)data);
  909. break;
  910. case sb_op:
  911. run->mmio.len = 1;
  912. *(u8 *)data = vcpu->arch.gprs[rt];
  913. kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  914. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  915. vcpu->arch.gprs[rt], *(u8 *)data);
  916. break;
  917. case swl_op:
  918. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  919. vcpu->arch.host_cp0_badvaddr) & (~0x3);
  920. run->mmio.len = 4;
  921. imme = vcpu->arch.host_cp0_badvaddr & 0x3;
  922. switch (imme) {
  923. case 0:
  924. *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
  925. (vcpu->arch.gprs[rt] >> 24);
  926. break;
  927. case 1:
  928. *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
  929. (vcpu->arch.gprs[rt] >> 16);
  930. break;
  931. case 2:
  932. *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
  933. (vcpu->arch.gprs[rt] >> 8);
  934. break;
  935. case 3:
  936. *(u32 *)data = vcpu->arch.gprs[rt];
  937. break;
  938. default:
  939. break;
  940. }
  941. kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  942. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  943. vcpu->arch.gprs[rt], *(u32 *)data);
  944. break;
  945. case swr_op:
  946. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  947. vcpu->arch.host_cp0_badvaddr) & (~0x3);
  948. run->mmio.len = 4;
  949. imme = vcpu->arch.host_cp0_badvaddr & 0x3;
  950. switch (imme) {
  951. case 0:
  952. *(u32 *)data = vcpu->arch.gprs[rt];
  953. break;
  954. case 1:
  955. *(u32 *)data = ((*(u32 *)data) & 0xff) |
  956. (vcpu->arch.gprs[rt] << 8);
  957. break;
  958. case 2:
  959. *(u32 *)data = ((*(u32 *)data) & 0xffff) |
  960. (vcpu->arch.gprs[rt] << 16);
  961. break;
  962. case 3:
  963. *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
  964. (vcpu->arch.gprs[rt] << 24);
  965. break;
  966. default:
  967. break;
  968. }
  969. kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  970. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  971. vcpu->arch.gprs[rt], *(u32 *)data);
  972. break;
  973. #if defined(CONFIG_64BIT)
  974. case sdl_op:
  975. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  976. vcpu->arch.host_cp0_badvaddr) & (~0x7);
  977. run->mmio.len = 8;
  978. imme = vcpu->arch.host_cp0_badvaddr & 0x7;
  979. switch (imme) {
  980. case 0:
  981. *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
  982. ((vcpu->arch.gprs[rt] >> 56) & 0xff);
  983. break;
  984. case 1:
  985. *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
  986. ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
  987. break;
  988. case 2:
  989. *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
  990. ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
  991. break;
  992. case 3:
  993. *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
  994. ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
  995. break;
  996. case 4:
  997. *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
  998. ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
  999. break;
  1000. case 5:
  1001. *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
  1002. ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
  1003. break;
  1004. case 6:
  1005. *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
  1006. ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
  1007. break;
  1008. case 7:
  1009. *(u64 *)data = vcpu->arch.gprs[rt];
  1010. break;
  1011. default:
  1012. break;
  1013. }
  1014. kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
  1015. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  1016. vcpu->arch.gprs[rt], *(u64 *)data);
  1017. break;
  1018. case sdr_op:
  1019. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  1020. vcpu->arch.host_cp0_badvaddr) & (~0x7);
  1021. run->mmio.len = 8;
  1022. imme = vcpu->arch.host_cp0_badvaddr & 0x7;
  1023. switch (imme) {
  1024. case 0:
  1025. *(u64 *)data = vcpu->arch.gprs[rt];
  1026. break;
  1027. case 1:
  1028. *(u64 *)data = ((*(u64 *)data) & 0xff) |
  1029. (vcpu->arch.gprs[rt] << 8);
  1030. break;
  1031. case 2:
  1032. *(u64 *)data = ((*(u64 *)data) & 0xffff) |
  1033. (vcpu->arch.gprs[rt] << 16);
  1034. break;
  1035. case 3:
  1036. *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
  1037. (vcpu->arch.gprs[rt] << 24);
  1038. break;
  1039. case 4:
  1040. *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
  1041. (vcpu->arch.gprs[rt] << 32);
  1042. break;
  1043. case 5:
  1044. *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
  1045. (vcpu->arch.gprs[rt] << 40);
  1046. break;
  1047. case 6:
  1048. *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
  1049. (vcpu->arch.gprs[rt] << 48);
  1050. break;
  1051. case 7:
  1052. *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
  1053. (vcpu->arch.gprs[rt] << 56);
  1054. break;
  1055. default:
  1056. break;
  1057. }
  1058. kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
  1059. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  1060. vcpu->arch.gprs[rt], *(u64 *)data);
  1061. break;
  1062. #endif
  1063. #ifdef CONFIG_CPU_LOONGSON64
  1064. case sdc2_op:
  1065. rt = inst.loongson3_lsdc2_format.rt;
  1066. switch (inst.loongson3_lsdc2_format.opcode1) {
  1067. /*
  1068. * Loongson-3 overridden sdc2 instructions.
  1069. * opcode1 instruction
  1070. * 0x0 gssbx: store 1 bytes from GPR
  1071. * 0x1 gsshx: store 2 bytes from GPR
  1072. * 0x2 gsswx: store 4 bytes from GPR
  1073. * 0x3 gssdx: store 8 bytes from GPR
  1074. */
  1075. case 0x0:
  1076. run->mmio.len = 1;
  1077. *(u8 *)data = vcpu->arch.gprs[rt];
  1078. kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  1079. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  1080. vcpu->arch.gprs[rt], *(u8 *)data);
  1081. break;
  1082. case 0x1:
  1083. run->mmio.len = 2;
  1084. *(u16 *)data = vcpu->arch.gprs[rt];
  1085. kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  1086. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  1087. vcpu->arch.gprs[rt], *(u16 *)data);
  1088. break;
  1089. case 0x2:
  1090. run->mmio.len = 4;
  1091. *(u32 *)data = vcpu->arch.gprs[rt];
  1092. kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  1093. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  1094. vcpu->arch.gprs[rt], *(u32 *)data);
  1095. break;
  1096. case 0x3:
  1097. run->mmio.len = 8;
  1098. *(u64 *)data = vcpu->arch.gprs[rt];
  1099. kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
  1100. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  1101. vcpu->arch.gprs[rt], *(u64 *)data);
  1102. break;
  1103. default:
  1104. kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
  1105. inst.word);
  1106. break;
  1107. }
  1108. break;
  1109. #endif
  1110. default:
  1111. kvm_err("Store not yet supported (inst=0x%08x)\n",
  1112. inst.word);
  1113. goto out_fail;
  1114. }
  1115. vcpu->mmio_needed = 1;
  1116. run->mmio.is_write = 1;
  1117. vcpu->mmio_is_write = 1;
  1118. r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
  1119. run->mmio.phys_addr, run->mmio.len, data);
  1120. if (!r) {
  1121. vcpu->mmio_needed = 0;
  1122. return EMULATE_DONE;
  1123. }
  1124. return EMULATE_DO_MMIO;
  1125. out_fail:
  1126. /* Rollback PC if emulation was unsuccessful */
  1127. vcpu->arch.pc = curr_pc;
  1128. return EMULATE_FAIL;
  1129. }
  1130. enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
  1131. u32 cause, struct kvm_vcpu *vcpu)
  1132. {
  1133. struct kvm_run *run = vcpu->run;
  1134. int r;
  1135. enum emulation_result er;
  1136. unsigned long curr_pc;
  1137. u32 op, rt;
  1138. unsigned int imme;
  1139. rt = inst.i_format.rt;
  1140. op = inst.i_format.opcode;
  1141. /*
  1142. * Find the resume PC now while we have safe and easy access to the
  1143. * prior branch instruction, and save it for
  1144. * kvm_mips_complete_mmio_load() to restore later.
  1145. */
  1146. curr_pc = vcpu->arch.pc;
  1147. er = update_pc(vcpu, cause);
  1148. if (er == EMULATE_FAIL)
  1149. return er;
  1150. vcpu->arch.io_pc = vcpu->arch.pc;
  1151. vcpu->arch.pc = curr_pc;
  1152. vcpu->arch.io_gpr = rt;
  1153. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  1154. vcpu->arch.host_cp0_badvaddr);
  1155. if (run->mmio.phys_addr == KVM_INVALID_ADDR)
  1156. return EMULATE_FAIL;
  1157. vcpu->mmio_needed = 2; /* signed */
  1158. switch (op) {
  1159. #if defined(CONFIG_64BIT)
  1160. case ld_op:
  1161. run->mmio.len = 8;
  1162. break;
  1163. case lwu_op:
  1164. vcpu->mmio_needed = 1; /* unsigned */
  1165. fallthrough;
  1166. #endif
  1167. case lw_op:
  1168. run->mmio.len = 4;
  1169. break;
  1170. case lhu_op:
  1171. vcpu->mmio_needed = 1; /* unsigned */
  1172. fallthrough;
  1173. case lh_op:
  1174. run->mmio.len = 2;
  1175. break;
  1176. case lbu_op:
  1177. vcpu->mmio_needed = 1; /* unsigned */
  1178. fallthrough;
  1179. case lb_op:
  1180. run->mmio.len = 1;
  1181. break;
  1182. case lwl_op:
  1183. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  1184. vcpu->arch.host_cp0_badvaddr) & (~0x3);
  1185. run->mmio.len = 4;
  1186. imme = vcpu->arch.host_cp0_badvaddr & 0x3;
  1187. switch (imme) {
  1188. case 0:
  1189. vcpu->mmio_needed = 3; /* 1 byte */
  1190. break;
  1191. case 1:
  1192. vcpu->mmio_needed = 4; /* 2 bytes */
  1193. break;
  1194. case 2:
  1195. vcpu->mmio_needed = 5; /* 3 bytes */
  1196. break;
  1197. case 3:
  1198. vcpu->mmio_needed = 6; /* 4 bytes */
  1199. break;
  1200. default:
  1201. break;
  1202. }
  1203. break;
  1204. case lwr_op:
  1205. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  1206. vcpu->arch.host_cp0_badvaddr) & (~0x3);
  1207. run->mmio.len = 4;
  1208. imme = vcpu->arch.host_cp0_badvaddr & 0x3;
  1209. switch (imme) {
  1210. case 0:
  1211. vcpu->mmio_needed = 7; /* 4 bytes */
  1212. break;
  1213. case 1:
  1214. vcpu->mmio_needed = 8; /* 3 bytes */
  1215. break;
  1216. case 2:
  1217. vcpu->mmio_needed = 9; /* 2 bytes */
  1218. break;
  1219. case 3:
  1220. vcpu->mmio_needed = 10; /* 1 byte */
  1221. break;
  1222. default:
  1223. break;
  1224. }
  1225. break;
  1226. #if defined(CONFIG_64BIT)
  1227. case ldl_op:
  1228. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  1229. vcpu->arch.host_cp0_badvaddr) & (~0x7);
  1230. run->mmio.len = 8;
  1231. imme = vcpu->arch.host_cp0_badvaddr & 0x7;
  1232. switch (imme) {
  1233. case 0:
  1234. vcpu->mmio_needed = 11; /* 1 byte */
  1235. break;
  1236. case 1:
  1237. vcpu->mmio_needed = 12; /* 2 bytes */
  1238. break;
  1239. case 2:
  1240. vcpu->mmio_needed = 13; /* 3 bytes */
  1241. break;
  1242. case 3:
  1243. vcpu->mmio_needed = 14; /* 4 bytes */
  1244. break;
  1245. case 4:
  1246. vcpu->mmio_needed = 15; /* 5 bytes */
  1247. break;
  1248. case 5:
  1249. vcpu->mmio_needed = 16; /* 6 bytes */
  1250. break;
  1251. case 6:
  1252. vcpu->mmio_needed = 17; /* 7 bytes */
  1253. break;
  1254. case 7:
  1255. vcpu->mmio_needed = 18; /* 8 bytes */
  1256. break;
  1257. default:
  1258. break;
  1259. }
  1260. break;
  1261. case ldr_op:
  1262. run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
  1263. vcpu->arch.host_cp0_badvaddr) & (~0x7);
  1264. run->mmio.len = 8;
  1265. imme = vcpu->arch.host_cp0_badvaddr & 0x7;
  1266. switch (imme) {
  1267. case 0:
  1268. vcpu->mmio_needed = 19; /* 8 bytes */
  1269. break;
  1270. case 1:
  1271. vcpu->mmio_needed = 20; /* 7 bytes */
  1272. break;
  1273. case 2:
  1274. vcpu->mmio_needed = 21; /* 6 bytes */
  1275. break;
  1276. case 3:
  1277. vcpu->mmio_needed = 22; /* 5 bytes */
  1278. break;
  1279. case 4:
  1280. vcpu->mmio_needed = 23; /* 4 bytes */
  1281. break;
  1282. case 5:
  1283. vcpu->mmio_needed = 24; /* 3 bytes */
  1284. break;
  1285. case 6:
  1286. vcpu->mmio_needed = 25; /* 2 bytes */
  1287. break;
  1288. case 7:
  1289. vcpu->mmio_needed = 26; /* 1 byte */
  1290. break;
  1291. default:
  1292. break;
  1293. }
  1294. break;
  1295. #endif
  1296. #ifdef CONFIG_CPU_LOONGSON64
  1297. case ldc2_op:
  1298. rt = inst.loongson3_lsdc2_format.rt;
  1299. switch (inst.loongson3_lsdc2_format.opcode1) {
  1300. /*
  1301. * Loongson-3 overridden ldc2 instructions.
  1302. * opcode1 instruction
  1303. * 0x0 gslbx: store 1 bytes from GPR
  1304. * 0x1 gslhx: store 2 bytes from GPR
  1305. * 0x2 gslwx: store 4 bytes from GPR
  1306. * 0x3 gsldx: store 8 bytes from GPR
  1307. */
  1308. case 0x0:
  1309. run->mmio.len = 1;
  1310. vcpu->mmio_needed = 27; /* signed */
  1311. break;
  1312. case 0x1:
  1313. run->mmio.len = 2;
  1314. vcpu->mmio_needed = 28; /* signed */
  1315. break;
  1316. case 0x2:
  1317. run->mmio.len = 4;
  1318. vcpu->mmio_needed = 29; /* signed */
  1319. break;
  1320. case 0x3:
  1321. run->mmio.len = 8;
  1322. vcpu->mmio_needed = 30; /* signed */
  1323. break;
  1324. default:
  1325. kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
  1326. inst.word);
  1327. break;
  1328. }
  1329. break;
  1330. #endif
  1331. default:
  1332. kvm_err("Load not yet supported (inst=0x%08x)\n",
  1333. inst.word);
  1334. vcpu->mmio_needed = 0;
  1335. return EMULATE_FAIL;
  1336. }
  1337. run->mmio.is_write = 0;
  1338. vcpu->mmio_is_write = 0;
  1339. r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
  1340. run->mmio.phys_addr, run->mmio.len, run->mmio.data);
  1341. if (!r) {
  1342. kvm_mips_complete_mmio_load(vcpu);
  1343. vcpu->mmio_needed = 0;
  1344. return EMULATE_DONE;
  1345. }
  1346. return EMULATE_DO_MMIO;
  1347. }
  1348. enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
  1349. {
  1350. struct kvm_run *run = vcpu->run;
  1351. unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
  1352. enum emulation_result er = EMULATE_DONE;
  1353. if (run->mmio.len > sizeof(*gpr)) {
  1354. kvm_err("Bad MMIO length: %d", run->mmio.len);
  1355. er = EMULATE_FAIL;
  1356. goto done;
  1357. }
  1358. /* Restore saved resume PC */
  1359. vcpu->arch.pc = vcpu->arch.io_pc;
  1360. switch (run->mmio.len) {
  1361. case 8:
  1362. switch (vcpu->mmio_needed) {
  1363. case 11:
  1364. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
  1365. (((*(s64 *)run->mmio.data) & 0xff) << 56);
  1366. break;
  1367. case 12:
  1368. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
  1369. (((*(s64 *)run->mmio.data) & 0xffff) << 48);
  1370. break;
  1371. case 13:
  1372. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
  1373. (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
  1374. break;
  1375. case 14:
  1376. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
  1377. (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
  1378. break;
  1379. case 15:
  1380. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
  1381. (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
  1382. break;
  1383. case 16:
  1384. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
  1385. (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
  1386. break;
  1387. case 17:
  1388. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
  1389. (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
  1390. break;
  1391. case 18:
  1392. case 19:
  1393. *gpr = *(s64 *)run->mmio.data;
  1394. break;
  1395. case 20:
  1396. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
  1397. ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
  1398. break;
  1399. case 21:
  1400. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
  1401. ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
  1402. break;
  1403. case 22:
  1404. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
  1405. ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
  1406. break;
  1407. case 23:
  1408. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
  1409. ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
  1410. break;
  1411. case 24:
  1412. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
  1413. ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
  1414. break;
  1415. case 25:
  1416. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
  1417. ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
  1418. break;
  1419. case 26:
  1420. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
  1421. ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
  1422. break;
  1423. default:
  1424. *gpr = *(s64 *)run->mmio.data;
  1425. }
  1426. break;
  1427. case 4:
  1428. switch (vcpu->mmio_needed) {
  1429. case 1:
  1430. *gpr = *(u32 *)run->mmio.data;
  1431. break;
  1432. case 2:
  1433. *gpr = *(s32 *)run->mmio.data;
  1434. break;
  1435. case 3:
  1436. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
  1437. (((*(s32 *)run->mmio.data) & 0xff) << 24);
  1438. break;
  1439. case 4:
  1440. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
  1441. (((*(s32 *)run->mmio.data) & 0xffff) << 16);
  1442. break;
  1443. case 5:
  1444. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
  1445. (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
  1446. break;
  1447. case 6:
  1448. case 7:
  1449. *gpr = *(s32 *)run->mmio.data;
  1450. break;
  1451. case 8:
  1452. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
  1453. ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
  1454. break;
  1455. case 9:
  1456. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
  1457. ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
  1458. break;
  1459. case 10:
  1460. *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
  1461. ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
  1462. break;
  1463. default:
  1464. *gpr = *(s32 *)run->mmio.data;
  1465. }
  1466. break;
  1467. case 2:
  1468. if (vcpu->mmio_needed == 1)
  1469. *gpr = *(u16 *)run->mmio.data;
  1470. else
  1471. *gpr = *(s16 *)run->mmio.data;
  1472. break;
  1473. case 1:
  1474. if (vcpu->mmio_needed == 1)
  1475. *gpr = *(u8 *)run->mmio.data;
  1476. else
  1477. *gpr = *(s8 *)run->mmio.data;
  1478. break;
  1479. }
  1480. done:
  1481. return er;
  1482. }