insn.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2013 Huawei Ltd.
  4. * Author: Jiang Liu <[email protected]>
  5. *
  6. * Copyright (C) 2014-2016 Zi Shen Lim <[email protected]>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/bug.h>
  10. #include <linux/printk.h>
  11. #include <linux/sizes.h>
  12. #include <linux/types.h>
  13. #include <asm/debug-monitors.h>
  14. #include <asm/errno.h>
  15. #include <asm/insn.h>
  16. #include <asm/kprobes.h>
  17. #define AARCH64_INSN_SF_BIT BIT(31)
  18. #define AARCH64_INSN_N_BIT BIT(22)
  19. #define AARCH64_INSN_LSL_12 BIT(22)
  20. static const int aarch64_insn_encoding_class[] = {
  21. AARCH64_INSN_CLS_UNKNOWN,
  22. AARCH64_INSN_CLS_UNKNOWN,
  23. AARCH64_INSN_CLS_SVE,
  24. AARCH64_INSN_CLS_UNKNOWN,
  25. AARCH64_INSN_CLS_LDST,
  26. AARCH64_INSN_CLS_DP_REG,
  27. AARCH64_INSN_CLS_LDST,
  28. AARCH64_INSN_CLS_DP_FPSIMD,
  29. AARCH64_INSN_CLS_DP_IMM,
  30. AARCH64_INSN_CLS_DP_IMM,
  31. AARCH64_INSN_CLS_BR_SYS,
  32. AARCH64_INSN_CLS_BR_SYS,
  33. AARCH64_INSN_CLS_LDST,
  34. AARCH64_INSN_CLS_DP_REG,
  35. AARCH64_INSN_CLS_LDST,
  36. AARCH64_INSN_CLS_DP_FPSIMD,
  37. };
  38. enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  39. {
  40. return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  41. }
  42. bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
  43. {
  44. if (!aarch64_insn_is_hint(insn))
  45. return false;
  46. switch (insn & 0xFE0) {
  47. case AARCH64_INSN_HINT_XPACLRI:
  48. case AARCH64_INSN_HINT_PACIA_1716:
  49. case AARCH64_INSN_HINT_PACIB_1716:
  50. case AARCH64_INSN_HINT_PACIAZ:
  51. case AARCH64_INSN_HINT_PACIASP:
  52. case AARCH64_INSN_HINT_PACIBZ:
  53. case AARCH64_INSN_HINT_PACIBSP:
  54. case AARCH64_INSN_HINT_BTI:
  55. case AARCH64_INSN_HINT_BTIC:
  56. case AARCH64_INSN_HINT_BTIJ:
  57. case AARCH64_INSN_HINT_BTIJC:
  58. case AARCH64_INSN_HINT_NOP:
  59. return true;
  60. default:
  61. return false;
  62. }
  63. }
  64. bool aarch64_insn_is_branch_imm(u32 insn)
  65. {
  66. return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  67. aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  68. aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  69. aarch64_insn_is_bcond(insn));
  70. }
  71. bool __kprobes aarch64_insn_uses_literal(u32 insn)
  72. {
  73. /* ldr/ldrsw (literal), prfm */
  74. return aarch64_insn_is_ldr_lit(insn) ||
  75. aarch64_insn_is_ldrsw_lit(insn) ||
  76. aarch64_insn_is_adr_adrp(insn) ||
  77. aarch64_insn_is_prfm_lit(insn);
  78. }
  79. bool __kprobes aarch64_insn_is_branch(u32 insn)
  80. {
  81. /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
  82. return aarch64_insn_is_b(insn) ||
  83. aarch64_insn_is_bl(insn) ||
  84. aarch64_insn_is_cbz(insn) ||
  85. aarch64_insn_is_cbnz(insn) ||
  86. aarch64_insn_is_tbz(insn) ||
  87. aarch64_insn_is_tbnz(insn) ||
  88. aarch64_insn_is_ret(insn) ||
  89. aarch64_insn_is_ret_auth(insn) ||
  90. aarch64_insn_is_br(insn) ||
  91. aarch64_insn_is_br_auth(insn) ||
  92. aarch64_insn_is_blr(insn) ||
  93. aarch64_insn_is_blr_auth(insn) ||
  94. aarch64_insn_is_bcond(insn);
  95. }
  96. static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
  97. u32 *maskp, int *shiftp)
  98. {
  99. u32 mask;
  100. int shift;
  101. switch (type) {
  102. case AARCH64_INSN_IMM_26:
  103. mask = BIT(26) - 1;
  104. shift = 0;
  105. break;
  106. case AARCH64_INSN_IMM_19:
  107. mask = BIT(19) - 1;
  108. shift = 5;
  109. break;
  110. case AARCH64_INSN_IMM_16:
  111. mask = BIT(16) - 1;
  112. shift = 5;
  113. break;
  114. case AARCH64_INSN_IMM_14:
  115. mask = BIT(14) - 1;
  116. shift = 5;
  117. break;
  118. case AARCH64_INSN_IMM_12:
  119. mask = BIT(12) - 1;
  120. shift = 10;
  121. break;
  122. case AARCH64_INSN_IMM_9:
  123. mask = BIT(9) - 1;
  124. shift = 12;
  125. break;
  126. case AARCH64_INSN_IMM_7:
  127. mask = BIT(7) - 1;
  128. shift = 15;
  129. break;
  130. case AARCH64_INSN_IMM_6:
  131. case AARCH64_INSN_IMM_S:
  132. mask = BIT(6) - 1;
  133. shift = 10;
  134. break;
  135. case AARCH64_INSN_IMM_R:
  136. mask = BIT(6) - 1;
  137. shift = 16;
  138. break;
  139. case AARCH64_INSN_IMM_N:
  140. mask = 1;
  141. shift = 22;
  142. break;
  143. default:
  144. return -EINVAL;
  145. }
  146. *maskp = mask;
  147. *shiftp = shift;
  148. return 0;
  149. }
  150. #define ADR_IMM_HILOSPLIT 2
  151. #define ADR_IMM_SIZE SZ_2M
  152. #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
  153. #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
  154. #define ADR_IMM_LOSHIFT 29
  155. #define ADR_IMM_HISHIFT 5
  156. u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
  157. {
  158. u32 immlo, immhi, mask;
  159. int shift;
  160. switch (type) {
  161. case AARCH64_INSN_IMM_ADR:
  162. shift = 0;
  163. immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
  164. immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
  165. insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
  166. mask = ADR_IMM_SIZE - 1;
  167. break;
  168. default:
  169. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  170. pr_err("%s: unknown immediate encoding %d\n", __func__,
  171. type);
  172. return 0;
  173. }
  174. }
  175. return (insn >> shift) & mask;
  176. }
  177. u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
  178. u32 insn, u64 imm)
  179. {
  180. u32 immlo, immhi, mask;
  181. int shift;
  182. if (insn == AARCH64_BREAK_FAULT)
  183. return AARCH64_BREAK_FAULT;
  184. switch (type) {
  185. case AARCH64_INSN_IMM_ADR:
  186. shift = 0;
  187. immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
  188. imm >>= ADR_IMM_HILOSPLIT;
  189. immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
  190. imm = immlo | immhi;
  191. mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
  192. (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
  193. break;
  194. default:
  195. if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
  196. pr_err("%s: unknown immediate encoding %d\n", __func__,
  197. type);
  198. return AARCH64_BREAK_FAULT;
  199. }
  200. }
  201. /* Update the immediate field. */
  202. insn &= ~(mask << shift);
  203. insn |= (imm & mask) << shift;
  204. return insn;
  205. }
  206. u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
  207. u32 insn)
  208. {
  209. int shift;
  210. switch (type) {
  211. case AARCH64_INSN_REGTYPE_RT:
  212. case AARCH64_INSN_REGTYPE_RD:
  213. shift = 0;
  214. break;
  215. case AARCH64_INSN_REGTYPE_RN:
  216. shift = 5;
  217. break;
  218. case AARCH64_INSN_REGTYPE_RT2:
  219. case AARCH64_INSN_REGTYPE_RA:
  220. shift = 10;
  221. break;
  222. case AARCH64_INSN_REGTYPE_RM:
  223. shift = 16;
  224. break;
  225. default:
  226. pr_err("%s: unknown register type encoding %d\n", __func__,
  227. type);
  228. return 0;
  229. }
  230. return (insn >> shift) & GENMASK(4, 0);
  231. }
  232. static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
  233. u32 insn,
  234. enum aarch64_insn_register reg)
  235. {
  236. int shift;
  237. if (insn == AARCH64_BREAK_FAULT)
  238. return AARCH64_BREAK_FAULT;
  239. if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
  240. pr_err("%s: unknown register encoding %d\n", __func__, reg);
  241. return AARCH64_BREAK_FAULT;
  242. }
  243. switch (type) {
  244. case AARCH64_INSN_REGTYPE_RT:
  245. case AARCH64_INSN_REGTYPE_RD:
  246. shift = 0;
  247. break;
  248. case AARCH64_INSN_REGTYPE_RN:
  249. shift = 5;
  250. break;
  251. case AARCH64_INSN_REGTYPE_RT2:
  252. case AARCH64_INSN_REGTYPE_RA:
  253. shift = 10;
  254. break;
  255. case AARCH64_INSN_REGTYPE_RM:
  256. case AARCH64_INSN_REGTYPE_RS:
  257. shift = 16;
  258. break;
  259. default:
  260. pr_err("%s: unknown register type encoding %d\n", __func__,
  261. type);
  262. return AARCH64_BREAK_FAULT;
  263. }
  264. insn &= ~(GENMASK(4, 0) << shift);
  265. insn |= reg << shift;
  266. return insn;
  267. }
  268. static const u32 aarch64_insn_ldst_size[] = {
  269. [AARCH64_INSN_SIZE_8] = 0,
  270. [AARCH64_INSN_SIZE_16] = 1,
  271. [AARCH64_INSN_SIZE_32] = 2,
  272. [AARCH64_INSN_SIZE_64] = 3,
  273. };
  274. static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
  275. u32 insn)
  276. {
  277. u32 size;
  278. if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
  279. pr_err("%s: unknown size encoding %d\n", __func__, type);
  280. return AARCH64_BREAK_FAULT;
  281. }
  282. size = aarch64_insn_ldst_size[type];
  283. insn &= ~GENMASK(31, 30);
  284. insn |= size << 30;
  285. return insn;
  286. }
  287. static inline long label_imm_common(unsigned long pc, unsigned long addr,
  288. long range)
  289. {
  290. long offset;
  291. if ((pc & 0x3) || (addr & 0x3)) {
  292. pr_err("%s: A64 instructions must be word aligned\n", __func__);
  293. return range;
  294. }
  295. offset = ((long)addr - (long)pc);
  296. if (offset < -range || offset >= range) {
  297. pr_err("%s: offset out of range\n", __func__);
  298. return range;
  299. }
  300. return offset;
  301. }
  302. u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
  303. enum aarch64_insn_branch_type type)
  304. {
  305. u32 insn;
  306. long offset;
  307. /*
  308. * B/BL support [-128M, 128M) offset
  309. * ARM64 virtual address arrangement guarantees all kernel and module
  310. * texts are within +/-128M.
  311. */
  312. offset = label_imm_common(pc, addr, SZ_128M);
  313. if (offset >= SZ_128M)
  314. return AARCH64_BREAK_FAULT;
  315. switch (type) {
  316. case AARCH64_INSN_BRANCH_LINK:
  317. insn = aarch64_insn_get_bl_value();
  318. break;
  319. case AARCH64_INSN_BRANCH_NOLINK:
  320. insn = aarch64_insn_get_b_value();
  321. break;
  322. default:
  323. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  324. return AARCH64_BREAK_FAULT;
  325. }
  326. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  327. offset >> 2);
  328. }
  329. u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
  330. enum aarch64_insn_register reg,
  331. enum aarch64_insn_variant variant,
  332. enum aarch64_insn_branch_type type)
  333. {
  334. u32 insn;
  335. long offset;
  336. offset = label_imm_common(pc, addr, SZ_1M);
  337. if (offset >= SZ_1M)
  338. return AARCH64_BREAK_FAULT;
  339. switch (type) {
  340. case AARCH64_INSN_BRANCH_COMP_ZERO:
  341. insn = aarch64_insn_get_cbz_value();
  342. break;
  343. case AARCH64_INSN_BRANCH_COMP_NONZERO:
  344. insn = aarch64_insn_get_cbnz_value();
  345. break;
  346. default:
  347. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  348. return AARCH64_BREAK_FAULT;
  349. }
  350. switch (variant) {
  351. case AARCH64_INSN_VARIANT_32BIT:
  352. break;
  353. case AARCH64_INSN_VARIANT_64BIT:
  354. insn |= AARCH64_INSN_SF_BIT;
  355. break;
  356. default:
  357. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  358. return AARCH64_BREAK_FAULT;
  359. }
  360. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  361. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  362. offset >> 2);
  363. }
  364. u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
  365. enum aarch64_insn_condition cond)
  366. {
  367. u32 insn;
  368. long offset;
  369. offset = label_imm_common(pc, addr, SZ_1M);
  370. insn = aarch64_insn_get_bcond_value();
  371. if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
  372. pr_err("%s: unknown condition encoding %d\n", __func__, cond);
  373. return AARCH64_BREAK_FAULT;
  374. }
  375. insn |= cond;
  376. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  377. offset >> 2);
  378. }
  379. u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
  380. {
  381. return aarch64_insn_get_hint_value() | op;
  382. }
  383. u32 __kprobes aarch64_insn_gen_nop(void)
  384. {
  385. return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
  386. }
  387. u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
  388. enum aarch64_insn_branch_type type)
  389. {
  390. u32 insn;
  391. switch (type) {
  392. case AARCH64_INSN_BRANCH_NOLINK:
  393. insn = aarch64_insn_get_br_value();
  394. break;
  395. case AARCH64_INSN_BRANCH_LINK:
  396. insn = aarch64_insn_get_blr_value();
  397. break;
  398. case AARCH64_INSN_BRANCH_RETURN:
  399. insn = aarch64_insn_get_ret_value();
  400. break;
  401. default:
  402. pr_err("%s: unknown branch encoding %d\n", __func__, type);
  403. return AARCH64_BREAK_FAULT;
  404. }
  405. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
  406. }
  407. u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
  408. enum aarch64_insn_register base,
  409. enum aarch64_insn_register offset,
  410. enum aarch64_insn_size_type size,
  411. enum aarch64_insn_ldst_type type)
  412. {
  413. u32 insn;
  414. switch (type) {
  415. case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
  416. insn = aarch64_insn_get_ldr_reg_value();
  417. break;
  418. case AARCH64_INSN_LDST_STORE_REG_OFFSET:
  419. insn = aarch64_insn_get_str_reg_value();
  420. break;
  421. default:
  422. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  423. return AARCH64_BREAK_FAULT;
  424. }
  425. insn = aarch64_insn_encode_ldst_size(size, insn);
  426. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  427. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  428. base);
  429. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  430. offset);
  431. }
  432. u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
  433. enum aarch64_insn_register base,
  434. unsigned int imm,
  435. enum aarch64_insn_size_type size,
  436. enum aarch64_insn_ldst_type type)
  437. {
  438. u32 insn;
  439. u32 shift;
  440. if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
  441. pr_err("%s: unknown size encoding %d\n", __func__, type);
  442. return AARCH64_BREAK_FAULT;
  443. }
  444. shift = aarch64_insn_ldst_size[size];
  445. if (imm & ~(BIT(12 + shift) - BIT(shift))) {
  446. pr_err("%s: invalid imm: %d\n", __func__, imm);
  447. return AARCH64_BREAK_FAULT;
  448. }
  449. imm >>= shift;
  450. switch (type) {
  451. case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
  452. insn = aarch64_insn_get_ldr_imm_value();
  453. break;
  454. case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
  455. insn = aarch64_insn_get_str_imm_value();
  456. break;
  457. default:
  458. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  459. return AARCH64_BREAK_FAULT;
  460. }
  461. insn = aarch64_insn_encode_ldst_size(size, insn);
  462. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  463. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  464. base);
  465. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  466. }
  467. u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
  468. enum aarch64_insn_register reg,
  469. bool is64bit)
  470. {
  471. u32 insn;
  472. long offset;
  473. offset = label_imm_common(pc, addr, SZ_1M);
  474. if (offset >= SZ_1M)
  475. return AARCH64_BREAK_FAULT;
  476. insn = aarch64_insn_get_ldr_lit_value();
  477. if (is64bit)
  478. insn |= BIT(30);
  479. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
  480. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  481. offset >> 2);
  482. }
  483. u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
  484. enum aarch64_insn_register reg2,
  485. enum aarch64_insn_register base,
  486. int offset,
  487. enum aarch64_insn_variant variant,
  488. enum aarch64_insn_ldst_type type)
  489. {
  490. u32 insn;
  491. int shift;
  492. switch (type) {
  493. case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
  494. insn = aarch64_insn_get_ldp_pre_value();
  495. break;
  496. case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
  497. insn = aarch64_insn_get_stp_pre_value();
  498. break;
  499. case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
  500. insn = aarch64_insn_get_ldp_post_value();
  501. break;
  502. case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
  503. insn = aarch64_insn_get_stp_post_value();
  504. break;
  505. default:
  506. pr_err("%s: unknown load/store encoding %d\n", __func__, type);
  507. return AARCH64_BREAK_FAULT;
  508. }
  509. switch (variant) {
  510. case AARCH64_INSN_VARIANT_32BIT:
  511. if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
  512. pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
  513. __func__, offset);
  514. return AARCH64_BREAK_FAULT;
  515. }
  516. shift = 2;
  517. break;
  518. case AARCH64_INSN_VARIANT_64BIT:
  519. if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
  520. pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
  521. __func__, offset);
  522. return AARCH64_BREAK_FAULT;
  523. }
  524. shift = 3;
  525. insn |= AARCH64_INSN_SF_BIT;
  526. break;
  527. default:
  528. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  529. return AARCH64_BREAK_FAULT;
  530. }
  531. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  532. reg1);
  533. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  534. reg2);
  535. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  536. base);
  537. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
  538. offset >> shift);
  539. }
  540. u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
  541. enum aarch64_insn_register base,
  542. enum aarch64_insn_register state,
  543. enum aarch64_insn_size_type size,
  544. enum aarch64_insn_ldst_type type)
  545. {
  546. u32 insn;
  547. switch (type) {
  548. case AARCH64_INSN_LDST_LOAD_EX:
  549. case AARCH64_INSN_LDST_LOAD_ACQ_EX:
  550. insn = aarch64_insn_get_load_ex_value();
  551. if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
  552. insn |= BIT(15);
  553. break;
  554. case AARCH64_INSN_LDST_STORE_EX:
  555. case AARCH64_INSN_LDST_STORE_REL_EX:
  556. insn = aarch64_insn_get_store_ex_value();
  557. if (type == AARCH64_INSN_LDST_STORE_REL_EX)
  558. insn |= BIT(15);
  559. break;
  560. default:
  561. pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
  562. return AARCH64_BREAK_FAULT;
  563. }
  564. insn = aarch64_insn_encode_ldst_size(size, insn);
  565. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  566. reg);
  567. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  568. base);
  569. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
  570. AARCH64_INSN_REG_ZR);
  571. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  572. state);
  573. }
  574. #ifdef CONFIG_ARM64_LSE_ATOMICS
  575. static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
  576. u32 insn)
  577. {
  578. u32 order;
  579. switch (type) {
  580. case AARCH64_INSN_MEM_ORDER_NONE:
  581. order = 0;
  582. break;
  583. case AARCH64_INSN_MEM_ORDER_ACQ:
  584. order = 2;
  585. break;
  586. case AARCH64_INSN_MEM_ORDER_REL:
  587. order = 1;
  588. break;
  589. case AARCH64_INSN_MEM_ORDER_ACQREL:
  590. order = 3;
  591. break;
  592. default:
  593. pr_err("%s: unknown mem order %d\n", __func__, type);
  594. return AARCH64_BREAK_FAULT;
  595. }
  596. insn &= ~GENMASK(23, 22);
  597. insn |= order << 22;
  598. return insn;
  599. }
  600. u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
  601. enum aarch64_insn_register address,
  602. enum aarch64_insn_register value,
  603. enum aarch64_insn_size_type size,
  604. enum aarch64_insn_mem_atomic_op op,
  605. enum aarch64_insn_mem_order_type order)
  606. {
  607. u32 insn;
  608. switch (op) {
  609. case AARCH64_INSN_MEM_ATOMIC_ADD:
  610. insn = aarch64_insn_get_ldadd_value();
  611. break;
  612. case AARCH64_INSN_MEM_ATOMIC_CLR:
  613. insn = aarch64_insn_get_ldclr_value();
  614. break;
  615. case AARCH64_INSN_MEM_ATOMIC_EOR:
  616. insn = aarch64_insn_get_ldeor_value();
  617. break;
  618. case AARCH64_INSN_MEM_ATOMIC_SET:
  619. insn = aarch64_insn_get_ldset_value();
  620. break;
  621. case AARCH64_INSN_MEM_ATOMIC_SWP:
  622. insn = aarch64_insn_get_swp_value();
  623. break;
  624. default:
  625. pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
  626. return AARCH64_BREAK_FAULT;
  627. }
  628. switch (size) {
  629. case AARCH64_INSN_SIZE_32:
  630. case AARCH64_INSN_SIZE_64:
  631. break;
  632. default:
  633. pr_err("%s: unimplemented size encoding %d\n", __func__, size);
  634. return AARCH64_BREAK_FAULT;
  635. }
  636. insn = aarch64_insn_encode_ldst_size(size, insn);
  637. insn = aarch64_insn_encode_ldst_order(order, insn);
  638. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  639. result);
  640. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  641. address);
  642. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  643. value);
  644. }
  645. static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
  646. u32 insn)
  647. {
  648. u32 order;
  649. switch (type) {
  650. case AARCH64_INSN_MEM_ORDER_NONE:
  651. order = 0;
  652. break;
  653. case AARCH64_INSN_MEM_ORDER_ACQ:
  654. order = BIT(22);
  655. break;
  656. case AARCH64_INSN_MEM_ORDER_REL:
  657. order = BIT(15);
  658. break;
  659. case AARCH64_INSN_MEM_ORDER_ACQREL:
  660. order = BIT(15) | BIT(22);
  661. break;
  662. default:
  663. pr_err("%s: unknown mem order %d\n", __func__, type);
  664. return AARCH64_BREAK_FAULT;
  665. }
  666. insn &= ~(BIT(15) | BIT(22));
  667. insn |= order;
  668. return insn;
  669. }
  670. u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
  671. enum aarch64_insn_register address,
  672. enum aarch64_insn_register value,
  673. enum aarch64_insn_size_type size,
  674. enum aarch64_insn_mem_order_type order)
  675. {
  676. u32 insn;
  677. switch (size) {
  678. case AARCH64_INSN_SIZE_32:
  679. case AARCH64_INSN_SIZE_64:
  680. break;
  681. default:
  682. pr_err("%s: unimplemented size encoding %d\n", __func__, size);
  683. return AARCH64_BREAK_FAULT;
  684. }
  685. insn = aarch64_insn_get_cas_value();
  686. insn = aarch64_insn_encode_ldst_size(size, insn);
  687. insn = aarch64_insn_encode_cas_order(order, insn);
  688. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
  689. result);
  690. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  691. address);
  692. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
  693. value);
  694. }
  695. #endif
  696. static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
  697. enum aarch64_insn_prfm_target target,
  698. enum aarch64_insn_prfm_policy policy,
  699. u32 insn)
  700. {
  701. u32 imm_type = 0, imm_target = 0, imm_policy = 0;
  702. switch (type) {
  703. case AARCH64_INSN_PRFM_TYPE_PLD:
  704. break;
  705. case AARCH64_INSN_PRFM_TYPE_PLI:
  706. imm_type = BIT(0);
  707. break;
  708. case AARCH64_INSN_PRFM_TYPE_PST:
  709. imm_type = BIT(1);
  710. break;
  711. default:
  712. pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
  713. return AARCH64_BREAK_FAULT;
  714. }
  715. switch (target) {
  716. case AARCH64_INSN_PRFM_TARGET_L1:
  717. break;
  718. case AARCH64_INSN_PRFM_TARGET_L2:
  719. imm_target = BIT(0);
  720. break;
  721. case AARCH64_INSN_PRFM_TARGET_L3:
  722. imm_target = BIT(1);
  723. break;
  724. default:
  725. pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
  726. return AARCH64_BREAK_FAULT;
  727. }
  728. switch (policy) {
  729. case AARCH64_INSN_PRFM_POLICY_KEEP:
  730. break;
  731. case AARCH64_INSN_PRFM_POLICY_STRM:
  732. imm_policy = BIT(0);
  733. break;
  734. default:
  735. pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
  736. return AARCH64_BREAK_FAULT;
  737. }
  738. /* In this case, imm5 is encoded into Rt field. */
  739. insn &= ~GENMASK(4, 0);
  740. insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
  741. return insn;
  742. }
  743. u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
  744. enum aarch64_insn_prfm_type type,
  745. enum aarch64_insn_prfm_target target,
  746. enum aarch64_insn_prfm_policy policy)
  747. {
  748. u32 insn = aarch64_insn_get_prfm_value();
  749. insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
  750. insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
  751. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  752. base);
  753. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
  754. }
  755. u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
  756. enum aarch64_insn_register src,
  757. int imm, enum aarch64_insn_variant variant,
  758. enum aarch64_insn_adsb_type type)
  759. {
  760. u32 insn;
  761. switch (type) {
  762. case AARCH64_INSN_ADSB_ADD:
  763. insn = aarch64_insn_get_add_imm_value();
  764. break;
  765. case AARCH64_INSN_ADSB_SUB:
  766. insn = aarch64_insn_get_sub_imm_value();
  767. break;
  768. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  769. insn = aarch64_insn_get_adds_imm_value();
  770. break;
  771. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  772. insn = aarch64_insn_get_subs_imm_value();
  773. break;
  774. default:
  775. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  776. return AARCH64_BREAK_FAULT;
  777. }
  778. switch (variant) {
  779. case AARCH64_INSN_VARIANT_32BIT:
  780. break;
  781. case AARCH64_INSN_VARIANT_64BIT:
  782. insn |= AARCH64_INSN_SF_BIT;
  783. break;
  784. default:
  785. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  786. return AARCH64_BREAK_FAULT;
  787. }
  788. /* We can't encode more than a 24bit value (12bit + 12bit shift) */
  789. if (imm & ~(BIT(24) - 1))
  790. goto out;
  791. /* If we have something in the top 12 bits... */
  792. if (imm & ~(SZ_4K - 1)) {
  793. /* ... and in the low 12 bits -> error */
  794. if (imm & (SZ_4K - 1))
  795. goto out;
  796. imm >>= 12;
  797. insn |= AARCH64_INSN_LSL_12;
  798. }
  799. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  800. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  801. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
  802. out:
  803. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  804. return AARCH64_BREAK_FAULT;
  805. }
  806. u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
  807. enum aarch64_insn_register src,
  808. int immr, int imms,
  809. enum aarch64_insn_variant variant,
  810. enum aarch64_insn_bitfield_type type)
  811. {
  812. u32 insn;
  813. u32 mask;
  814. switch (type) {
  815. case AARCH64_INSN_BITFIELD_MOVE:
  816. insn = aarch64_insn_get_bfm_value();
  817. break;
  818. case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
  819. insn = aarch64_insn_get_ubfm_value();
  820. break;
  821. case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
  822. insn = aarch64_insn_get_sbfm_value();
  823. break;
  824. default:
  825. pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
  826. return AARCH64_BREAK_FAULT;
  827. }
  828. switch (variant) {
  829. case AARCH64_INSN_VARIANT_32BIT:
  830. mask = GENMASK(4, 0);
  831. break;
  832. case AARCH64_INSN_VARIANT_64BIT:
  833. insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
  834. mask = GENMASK(5, 0);
  835. break;
  836. default:
  837. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  838. return AARCH64_BREAK_FAULT;
  839. }
  840. if (immr & ~mask) {
  841. pr_err("%s: invalid immr encoding %d\n", __func__, immr);
  842. return AARCH64_BREAK_FAULT;
  843. }
  844. if (imms & ~mask) {
  845. pr_err("%s: invalid imms encoding %d\n", __func__, imms);
  846. return AARCH64_BREAK_FAULT;
  847. }
  848. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  849. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  850. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  851. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  852. }
  853. u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
  854. int imm, int shift,
  855. enum aarch64_insn_variant variant,
  856. enum aarch64_insn_movewide_type type)
  857. {
  858. u32 insn;
  859. switch (type) {
  860. case AARCH64_INSN_MOVEWIDE_ZERO:
  861. insn = aarch64_insn_get_movz_value();
  862. break;
  863. case AARCH64_INSN_MOVEWIDE_KEEP:
  864. insn = aarch64_insn_get_movk_value();
  865. break;
  866. case AARCH64_INSN_MOVEWIDE_INVERSE:
  867. insn = aarch64_insn_get_movn_value();
  868. break;
  869. default:
  870. pr_err("%s: unknown movewide encoding %d\n", __func__, type);
  871. return AARCH64_BREAK_FAULT;
  872. }
  873. if (imm & ~(SZ_64K - 1)) {
  874. pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
  875. return AARCH64_BREAK_FAULT;
  876. }
  877. switch (variant) {
  878. case AARCH64_INSN_VARIANT_32BIT:
  879. if (shift != 0 && shift != 16) {
  880. pr_err("%s: invalid shift encoding %d\n", __func__,
  881. shift);
  882. return AARCH64_BREAK_FAULT;
  883. }
  884. break;
  885. case AARCH64_INSN_VARIANT_64BIT:
  886. insn |= AARCH64_INSN_SF_BIT;
  887. if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
  888. pr_err("%s: invalid shift encoding %d\n", __func__,
  889. shift);
  890. return AARCH64_BREAK_FAULT;
  891. }
  892. break;
  893. default:
  894. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  895. return AARCH64_BREAK_FAULT;
  896. }
  897. insn |= (shift >> 4) << 21;
  898. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  899. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
  900. }
  901. u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
  902. enum aarch64_insn_register src,
  903. enum aarch64_insn_register reg,
  904. int shift,
  905. enum aarch64_insn_variant variant,
  906. enum aarch64_insn_adsb_type type)
  907. {
  908. u32 insn;
  909. switch (type) {
  910. case AARCH64_INSN_ADSB_ADD:
  911. insn = aarch64_insn_get_add_value();
  912. break;
  913. case AARCH64_INSN_ADSB_SUB:
  914. insn = aarch64_insn_get_sub_value();
  915. break;
  916. case AARCH64_INSN_ADSB_ADD_SETFLAGS:
  917. insn = aarch64_insn_get_adds_value();
  918. break;
  919. case AARCH64_INSN_ADSB_SUB_SETFLAGS:
  920. insn = aarch64_insn_get_subs_value();
  921. break;
  922. default:
  923. pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
  924. return AARCH64_BREAK_FAULT;
  925. }
  926. switch (variant) {
  927. case AARCH64_INSN_VARIANT_32BIT:
  928. if (shift & ~(SZ_32 - 1)) {
  929. pr_err("%s: invalid shift encoding %d\n", __func__,
  930. shift);
  931. return AARCH64_BREAK_FAULT;
  932. }
  933. break;
  934. case AARCH64_INSN_VARIANT_64BIT:
  935. insn |= AARCH64_INSN_SF_BIT;
  936. if (shift & ~(SZ_64 - 1)) {
  937. pr_err("%s: invalid shift encoding %d\n", __func__,
  938. shift);
  939. return AARCH64_BREAK_FAULT;
  940. }
  941. break;
  942. default:
  943. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  944. return AARCH64_BREAK_FAULT;
  945. }
  946. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  947. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  948. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  949. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  950. }
  951. u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
  952. enum aarch64_insn_register src,
  953. enum aarch64_insn_variant variant,
  954. enum aarch64_insn_data1_type type)
  955. {
  956. u32 insn;
  957. switch (type) {
  958. case AARCH64_INSN_DATA1_REVERSE_16:
  959. insn = aarch64_insn_get_rev16_value();
  960. break;
  961. case AARCH64_INSN_DATA1_REVERSE_32:
  962. insn = aarch64_insn_get_rev32_value();
  963. break;
  964. case AARCH64_INSN_DATA1_REVERSE_64:
  965. if (variant != AARCH64_INSN_VARIANT_64BIT) {
  966. pr_err("%s: invalid variant for reverse64 %d\n",
  967. __func__, variant);
  968. return AARCH64_BREAK_FAULT;
  969. }
  970. insn = aarch64_insn_get_rev64_value();
  971. break;
  972. default:
  973. pr_err("%s: unknown data1 encoding %d\n", __func__, type);
  974. return AARCH64_BREAK_FAULT;
  975. }
  976. switch (variant) {
  977. case AARCH64_INSN_VARIANT_32BIT:
  978. break;
  979. case AARCH64_INSN_VARIANT_64BIT:
  980. insn |= AARCH64_INSN_SF_BIT;
  981. break;
  982. default:
  983. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  984. return AARCH64_BREAK_FAULT;
  985. }
  986. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  987. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  988. }
  989. u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
  990. enum aarch64_insn_register src,
  991. enum aarch64_insn_register reg,
  992. enum aarch64_insn_variant variant,
  993. enum aarch64_insn_data2_type type)
  994. {
  995. u32 insn;
  996. switch (type) {
  997. case AARCH64_INSN_DATA2_UDIV:
  998. insn = aarch64_insn_get_udiv_value();
  999. break;
  1000. case AARCH64_INSN_DATA2_SDIV:
  1001. insn = aarch64_insn_get_sdiv_value();
  1002. break;
  1003. case AARCH64_INSN_DATA2_LSLV:
  1004. insn = aarch64_insn_get_lslv_value();
  1005. break;
  1006. case AARCH64_INSN_DATA2_LSRV:
  1007. insn = aarch64_insn_get_lsrv_value();
  1008. break;
  1009. case AARCH64_INSN_DATA2_ASRV:
  1010. insn = aarch64_insn_get_asrv_value();
  1011. break;
  1012. case AARCH64_INSN_DATA2_RORV:
  1013. insn = aarch64_insn_get_rorv_value();
  1014. break;
  1015. default:
  1016. pr_err("%s: unknown data2 encoding %d\n", __func__, type);
  1017. return AARCH64_BREAK_FAULT;
  1018. }
  1019. switch (variant) {
  1020. case AARCH64_INSN_VARIANT_32BIT:
  1021. break;
  1022. case AARCH64_INSN_VARIANT_64BIT:
  1023. insn |= AARCH64_INSN_SF_BIT;
  1024. break;
  1025. default:
  1026. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1027. return AARCH64_BREAK_FAULT;
  1028. }
  1029. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1030. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1031. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1032. }
  1033. u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
  1034. enum aarch64_insn_register src,
  1035. enum aarch64_insn_register reg1,
  1036. enum aarch64_insn_register reg2,
  1037. enum aarch64_insn_variant variant,
  1038. enum aarch64_insn_data3_type type)
  1039. {
  1040. u32 insn;
  1041. switch (type) {
  1042. case AARCH64_INSN_DATA3_MADD:
  1043. insn = aarch64_insn_get_madd_value();
  1044. break;
  1045. case AARCH64_INSN_DATA3_MSUB:
  1046. insn = aarch64_insn_get_msub_value();
  1047. break;
  1048. default:
  1049. pr_err("%s: unknown data3 encoding %d\n", __func__, type);
  1050. return AARCH64_BREAK_FAULT;
  1051. }
  1052. switch (variant) {
  1053. case AARCH64_INSN_VARIANT_32BIT:
  1054. break;
  1055. case AARCH64_INSN_VARIANT_64BIT:
  1056. insn |= AARCH64_INSN_SF_BIT;
  1057. break;
  1058. default:
  1059. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1060. return AARCH64_BREAK_FAULT;
  1061. }
  1062. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1063. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
  1064. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
  1065. reg1);
  1066. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
  1067. reg2);
  1068. }
  1069. u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
  1070. enum aarch64_insn_register src,
  1071. enum aarch64_insn_register reg,
  1072. int shift,
  1073. enum aarch64_insn_variant variant,
  1074. enum aarch64_insn_logic_type type)
  1075. {
  1076. u32 insn;
  1077. switch (type) {
  1078. case AARCH64_INSN_LOGIC_AND:
  1079. insn = aarch64_insn_get_and_value();
  1080. break;
  1081. case AARCH64_INSN_LOGIC_BIC:
  1082. insn = aarch64_insn_get_bic_value();
  1083. break;
  1084. case AARCH64_INSN_LOGIC_ORR:
  1085. insn = aarch64_insn_get_orr_value();
  1086. break;
  1087. case AARCH64_INSN_LOGIC_ORN:
  1088. insn = aarch64_insn_get_orn_value();
  1089. break;
  1090. case AARCH64_INSN_LOGIC_EOR:
  1091. insn = aarch64_insn_get_eor_value();
  1092. break;
  1093. case AARCH64_INSN_LOGIC_EON:
  1094. insn = aarch64_insn_get_eon_value();
  1095. break;
  1096. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1097. insn = aarch64_insn_get_ands_value();
  1098. break;
  1099. case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
  1100. insn = aarch64_insn_get_bics_value();
  1101. break;
  1102. default:
  1103. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1104. return AARCH64_BREAK_FAULT;
  1105. }
  1106. switch (variant) {
  1107. case AARCH64_INSN_VARIANT_32BIT:
  1108. if (shift & ~(SZ_32 - 1)) {
  1109. pr_err("%s: invalid shift encoding %d\n", __func__,
  1110. shift);
  1111. return AARCH64_BREAK_FAULT;
  1112. }
  1113. break;
  1114. case AARCH64_INSN_VARIANT_64BIT:
  1115. insn |= AARCH64_INSN_SF_BIT;
  1116. if (shift & ~(SZ_64 - 1)) {
  1117. pr_err("%s: invalid shift encoding %d\n", __func__,
  1118. shift);
  1119. return AARCH64_BREAK_FAULT;
  1120. }
  1121. break;
  1122. default:
  1123. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1124. return AARCH64_BREAK_FAULT;
  1125. }
  1126. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
  1127. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
  1128. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
  1129. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
  1130. }
  1131. /*
  1132. * MOV (register) is architecturally an alias of ORR (shifted register) where
  1133. * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
  1134. */
  1135. u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
  1136. enum aarch64_insn_register src,
  1137. enum aarch64_insn_variant variant)
  1138. {
  1139. return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
  1140. src, 0, variant,
  1141. AARCH64_INSN_LOGIC_ORR);
  1142. }
  1143. u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
  1144. enum aarch64_insn_register reg,
  1145. enum aarch64_insn_adr_type type)
  1146. {
  1147. u32 insn;
  1148. s32 offset;
  1149. switch (type) {
  1150. case AARCH64_INSN_ADR_TYPE_ADR:
  1151. insn = aarch64_insn_get_adr_value();
  1152. offset = addr - pc;
  1153. break;
  1154. case AARCH64_INSN_ADR_TYPE_ADRP:
  1155. insn = aarch64_insn_get_adrp_value();
  1156. offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
  1157. break;
  1158. default:
  1159. pr_err("%s: unknown adr encoding %d\n", __func__, type);
  1160. return AARCH64_BREAK_FAULT;
  1161. }
  1162. if (offset < -SZ_1M || offset >= SZ_1M)
  1163. return AARCH64_BREAK_FAULT;
  1164. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
  1165. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
  1166. }
  1167. /*
  1168. * Decode the imm field of a branch, and return the byte offset as a
  1169. * signed value (so it can be used when computing a new branch
  1170. * target).
  1171. */
  1172. s32 aarch64_get_branch_offset(u32 insn)
  1173. {
  1174. s32 imm;
  1175. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
  1176. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
  1177. return (imm << 6) >> 4;
  1178. }
  1179. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1180. aarch64_insn_is_bcond(insn)) {
  1181. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
  1182. return (imm << 13) >> 11;
  1183. }
  1184. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
  1185. imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
  1186. return (imm << 18) >> 16;
  1187. }
  1188. /* Unhandled instruction */
  1189. BUG();
  1190. }
  1191. /*
  1192. * Encode the displacement of a branch in the imm field and return the
  1193. * updated instruction.
  1194. */
  1195. u32 aarch64_set_branch_offset(u32 insn, s32 offset)
  1196. {
  1197. if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
  1198. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
  1199. offset >> 2);
  1200. if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  1201. aarch64_insn_is_bcond(insn))
  1202. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
  1203. offset >> 2);
  1204. if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
  1205. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
  1206. offset >> 2);
  1207. /* Unhandled instruction */
  1208. BUG();
  1209. }
  1210. s32 aarch64_insn_adrp_get_offset(u32 insn)
  1211. {
  1212. BUG_ON(!aarch64_insn_is_adrp(insn));
  1213. return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
  1214. }
  1215. u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
  1216. {
  1217. BUG_ON(!aarch64_insn_is_adrp(insn));
  1218. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
  1219. offset >> 12);
  1220. }
  1221. /*
  1222. * Extract the Op/CR data from a msr/mrs instruction.
  1223. */
  1224. u32 aarch64_insn_extract_system_reg(u32 insn)
  1225. {
  1226. return (insn & 0x1FFFE0) >> 5;
  1227. }
  1228. bool aarch32_insn_is_wide(u32 insn)
  1229. {
  1230. return insn >= 0xe800;
  1231. }
  1232. /*
  1233. * Macros/defines for extracting register numbers from instruction.
  1234. */
  1235. u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
  1236. {
  1237. return (insn & (0xf << offset)) >> offset;
  1238. }
  1239. #define OPC2_MASK 0x7
  1240. #define OPC2_OFFSET 5
  1241. u32 aarch32_insn_mcr_extract_opc2(u32 insn)
  1242. {
  1243. return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
  1244. }
  1245. #define CRM_MASK 0xf
  1246. u32 aarch32_insn_mcr_extract_crm(u32 insn)
  1247. {
  1248. return insn & CRM_MASK;
  1249. }
  1250. static bool range_of_ones(u64 val)
  1251. {
  1252. /* Doesn't handle full ones or full zeroes */
  1253. u64 sval = val >> __ffs64(val);
  1254. /* One of Sean Eron Anderson's bithack tricks */
  1255. return ((sval + 1) & (sval)) == 0;
  1256. }
  1257. static u32 aarch64_encode_immediate(u64 imm,
  1258. enum aarch64_insn_variant variant,
  1259. u32 insn)
  1260. {
  1261. unsigned int immr, imms, n, ones, ror, esz, tmp;
  1262. u64 mask;
  1263. switch (variant) {
  1264. case AARCH64_INSN_VARIANT_32BIT:
  1265. esz = 32;
  1266. break;
  1267. case AARCH64_INSN_VARIANT_64BIT:
  1268. insn |= AARCH64_INSN_SF_BIT;
  1269. esz = 64;
  1270. break;
  1271. default:
  1272. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1273. return AARCH64_BREAK_FAULT;
  1274. }
  1275. mask = GENMASK(esz - 1, 0);
  1276. /* Can't encode full zeroes, full ones, or value wider than the mask */
  1277. if (!imm || imm == mask || imm & ~mask)
  1278. return AARCH64_BREAK_FAULT;
  1279. /*
  1280. * Inverse of Replicate(). Try to spot a repeating pattern
  1281. * with a pow2 stride.
  1282. */
  1283. for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
  1284. u64 emask = BIT(tmp) - 1;
  1285. if ((imm & emask) != ((imm >> tmp) & emask))
  1286. break;
  1287. esz = tmp;
  1288. mask = emask;
  1289. }
  1290. /* N is only set if we're encoding a 64bit value */
  1291. n = esz == 64;
  1292. /* Trim imm to the element size */
  1293. imm &= mask;
  1294. /* That's how many ones we need to encode */
  1295. ones = hweight64(imm);
  1296. /*
  1297. * imms is set to (ones - 1), prefixed with a string of ones
  1298. * and a zero if they fit. Cap it to 6 bits.
  1299. */
  1300. imms = ones - 1;
  1301. imms |= 0xf << ffs(esz);
  1302. imms &= BIT(6) - 1;
  1303. /* Compute the rotation */
  1304. if (range_of_ones(imm)) {
  1305. /*
  1306. * Pattern: 0..01..10..0
  1307. *
  1308. * Compute how many rotate we need to align it right
  1309. */
  1310. ror = __ffs64(imm);
  1311. } else {
  1312. /*
  1313. * Pattern: 0..01..10..01..1
  1314. *
  1315. * Fill the unused top bits with ones, and check if
  1316. * the result is a valid immediate (all ones with a
  1317. * contiguous ranges of zeroes).
  1318. */
  1319. imm |= ~mask;
  1320. if (!range_of_ones(~imm))
  1321. return AARCH64_BREAK_FAULT;
  1322. /*
  1323. * Compute the rotation to get a continuous set of
  1324. * ones, with the first bit set at position 0
  1325. */
  1326. ror = fls64(~imm);
  1327. }
  1328. /*
  1329. * immr is the number of bits we need to rotate back to the
  1330. * original set of ones. Note that this is relative to the
  1331. * element size...
  1332. */
  1333. immr = (esz - ror) % esz;
  1334. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
  1335. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
  1336. return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
  1337. }
  1338. u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
  1339. enum aarch64_insn_variant variant,
  1340. enum aarch64_insn_register Rn,
  1341. enum aarch64_insn_register Rd,
  1342. u64 imm)
  1343. {
  1344. u32 insn;
  1345. switch (type) {
  1346. case AARCH64_INSN_LOGIC_AND:
  1347. insn = aarch64_insn_get_and_imm_value();
  1348. break;
  1349. case AARCH64_INSN_LOGIC_ORR:
  1350. insn = aarch64_insn_get_orr_imm_value();
  1351. break;
  1352. case AARCH64_INSN_LOGIC_EOR:
  1353. insn = aarch64_insn_get_eor_imm_value();
  1354. break;
  1355. case AARCH64_INSN_LOGIC_AND_SETFLAGS:
  1356. insn = aarch64_insn_get_ands_imm_value();
  1357. break;
  1358. default:
  1359. pr_err("%s: unknown logical encoding %d\n", __func__, type);
  1360. return AARCH64_BREAK_FAULT;
  1361. }
  1362. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1363. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1364. return aarch64_encode_immediate(imm, variant, insn);
  1365. }
  1366. u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
  1367. enum aarch64_insn_register Rm,
  1368. enum aarch64_insn_register Rn,
  1369. enum aarch64_insn_register Rd,
  1370. u8 lsb)
  1371. {
  1372. u32 insn;
  1373. insn = aarch64_insn_get_extr_value();
  1374. switch (variant) {
  1375. case AARCH64_INSN_VARIANT_32BIT:
  1376. if (lsb > 31)
  1377. return AARCH64_BREAK_FAULT;
  1378. break;
  1379. case AARCH64_INSN_VARIANT_64BIT:
  1380. if (lsb > 63)
  1381. return AARCH64_BREAK_FAULT;
  1382. insn |= AARCH64_INSN_SF_BIT;
  1383. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
  1384. break;
  1385. default:
  1386. pr_err("%s: unknown variant encoding %d\n", __func__, variant);
  1387. return AARCH64_BREAK_FAULT;
  1388. }
  1389. insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
  1390. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
  1391. insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
  1392. return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
  1393. }
  1394. u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
  1395. {
  1396. u32 opt;
  1397. u32 insn;
  1398. switch (type) {
  1399. case AARCH64_INSN_MB_SY:
  1400. opt = 0xf;
  1401. break;
  1402. case AARCH64_INSN_MB_ST:
  1403. opt = 0xe;
  1404. break;
  1405. case AARCH64_INSN_MB_LD:
  1406. opt = 0xd;
  1407. break;
  1408. case AARCH64_INSN_MB_ISH:
  1409. opt = 0xb;
  1410. break;
  1411. case AARCH64_INSN_MB_ISHST:
  1412. opt = 0xa;
  1413. break;
  1414. case AARCH64_INSN_MB_ISHLD:
  1415. opt = 0x9;
  1416. break;
  1417. case AARCH64_INSN_MB_NSH:
  1418. opt = 0x7;
  1419. break;
  1420. case AARCH64_INSN_MB_NSHST:
  1421. opt = 0x6;
  1422. break;
  1423. case AARCH64_INSN_MB_NSHLD:
  1424. opt = 0x5;
  1425. break;
  1426. default:
  1427. pr_err("%s: unknown dmb type %d\n", __func__, type);
  1428. return AARCH64_BREAK_FAULT;
  1429. }
  1430. insn = aarch64_insn_get_dmb_value();
  1431. insn &= ~GENMASK(11, 8);
  1432. insn |= (opt << 8);
  1433. return insn;
  1434. }