uaccess.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * vineetg: June 2010
  6. * -__clear_user( ) called multiple times during elf load was byte loop
  7. * converted to do as much word clear as possible.
  8. *
  9. * vineetg: Dec 2009
  10. * -Hand crafted constant propagation for "constant" copy sizes
  11. * -stock kernel shrunk by 33K at -O3
  12. *
  13. * vineetg: Sept 2009
  14. * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
  15. * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
  16. * -Enabled when doing -Os
  17. *
  18. * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  19. */
  20. #ifndef _ASM_ARC_UACCESS_H
  21. #define _ASM_ARC_UACCESS_H
  22. #include <linux/string.h> /* for generic string functions */
  23. /*********** Single byte/hword/word copies ******************/
  24. #define __get_user_fn(sz, u, k) \
  25. ({ \
  26. long __ret = 0; /* success by default */ \
  27. switch (sz) { \
  28. case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
  29. case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
  30. case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
  31. case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
  32. } \
  33. __ret; \
  34. })
  35. /*
  36. * Returns 0 on success, -EFAULT if not.
  37. * @ret already contains 0 - given that errors will be less likely
  38. * (hence +r asm constraint below).
  39. * In case of error, fixup code will make it -EFAULT
  40. */
  41. #define __arc_get_user_one(dst, src, op, ret) \
  42. __asm__ __volatile__( \
  43. "1: "op" %1,[%2]\n" \
  44. "2: ;nop\n" \
  45. " .section .fixup, \"ax\"\n" \
  46. " .align 4\n" \
  47. "3: # return -EFAULT\n" \
  48. " mov %0, %3\n" \
  49. " # zero out dst ptr\n" \
  50. " mov %1, 0\n" \
  51. " j 2b\n" \
  52. " .previous\n" \
  53. " .section __ex_table, \"a\"\n" \
  54. " .align 4\n" \
  55. " .word 1b,3b\n" \
  56. " .previous\n" \
  57. \
  58. : "+r" (ret), "=r" (dst) \
  59. : "r" (src), "ir" (-EFAULT))
  60. #define __arc_get_user_one_64(dst, src, ret) \
  61. __asm__ __volatile__( \
  62. "1: ld %1,[%2]\n" \
  63. "4: ld %R1,[%2, 4]\n" \
  64. "2: ;nop\n" \
  65. " .section .fixup, \"ax\"\n" \
  66. " .align 4\n" \
  67. "3: # return -EFAULT\n" \
  68. " mov %0, %3\n" \
  69. " # zero out dst ptr\n" \
  70. " mov %1, 0\n" \
  71. " mov %R1, 0\n" \
  72. " j 2b\n" \
  73. " .previous\n" \
  74. " .section __ex_table, \"a\"\n" \
  75. " .align 4\n" \
  76. " .word 1b,3b\n" \
  77. " .word 4b,3b\n" \
  78. " .previous\n" \
  79. \
  80. : "+r" (ret), "=r" (dst) \
  81. : "r" (src), "ir" (-EFAULT))
  82. #define __put_user_fn(sz, u, k) \
  83. ({ \
  84. long __ret = 0; /* success by default */ \
  85. switch (sz) { \
  86. case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
  87. case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
  88. case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
  89. case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
  90. } \
  91. __ret; \
  92. })
  93. #define __arc_put_user_one(src, dst, op, ret) \
  94. __asm__ __volatile__( \
  95. "1: "op" %1,[%2]\n" \
  96. "2: ;nop\n" \
  97. " .section .fixup, \"ax\"\n" \
  98. " .align 4\n" \
  99. "3: mov %0, %3\n" \
  100. " j 2b\n" \
  101. " .previous\n" \
  102. " .section __ex_table, \"a\"\n" \
  103. " .align 4\n" \
  104. " .word 1b,3b\n" \
  105. " .previous\n" \
  106. \
  107. : "+r" (ret) \
  108. : "r" (src), "r" (dst), "ir" (-EFAULT))
  109. #define __arc_put_user_one_64(src, dst, ret) \
  110. __asm__ __volatile__( \
  111. "1: st %1,[%2]\n" \
  112. "4: st %R1,[%2, 4]\n" \
  113. "2: ;nop\n" \
  114. " .section .fixup, \"ax\"\n" \
  115. " .align 4\n" \
  116. "3: mov %0, %3\n" \
  117. " j 2b\n" \
  118. " .previous\n" \
  119. " .section __ex_table, \"a\"\n" \
  120. " .align 4\n" \
  121. " .word 1b,3b\n" \
  122. " .word 4b,3b\n" \
  123. " .previous\n" \
  124. \
  125. : "+r" (ret) \
  126. : "r" (src), "r" (dst), "ir" (-EFAULT))
  127. static inline unsigned long
  128. raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  129. {
  130. long res = 0;
  131. char val;
  132. unsigned long tmp1, tmp2, tmp3, tmp4;
  133. unsigned long orig_n = n;
  134. if (n == 0)
  135. return 0;
  136. /* unaligned */
  137. if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
  138. unsigned char tmp;
  139. __asm__ __volatile__ (
  140. " mov.f lp_count, %0 \n"
  141. " lpnz 2f \n"
  142. "1: ldb.ab %1, [%3, 1] \n"
  143. " stb.ab %1, [%2, 1] \n"
  144. " sub %0,%0,1 \n"
  145. "2: ;nop \n"
  146. " .section .fixup, \"ax\" \n"
  147. " .align 4 \n"
  148. "3: j 2b \n"
  149. " .previous \n"
  150. " .section __ex_table, \"a\" \n"
  151. " .align 4 \n"
  152. " .word 1b, 3b \n"
  153. " .previous \n"
  154. : "+r" (n),
  155. /*
  156. * Note as an '&' earlyclobber operand to make sure the
  157. * temporary register inside the loop is not the same as
  158. * FROM or TO.
  159. */
  160. "=&r" (tmp), "+r" (to), "+r" (from)
  161. :
  162. : "lp_count", "memory");
  163. return n;
  164. }
  165. /*
  166. * Hand-crafted constant propagation to reduce code sz of the
  167. * laddered copy 16x,8,4,2,1
  168. */
  169. if (__builtin_constant_p(orig_n)) {
  170. res = orig_n;
  171. if (orig_n / 16) {
  172. orig_n = orig_n % 16;
  173. __asm__ __volatile__(
  174. " lsr lp_count, %7,4 \n"
  175. " lp 3f \n"
  176. "1: ld.ab %3, [%2, 4] \n"
  177. "11: ld.ab %4, [%2, 4] \n"
  178. "12: ld.ab %5, [%2, 4] \n"
  179. "13: ld.ab %6, [%2, 4] \n"
  180. " st.ab %3, [%1, 4] \n"
  181. " st.ab %4, [%1, 4] \n"
  182. " st.ab %5, [%1, 4] \n"
  183. " st.ab %6, [%1, 4] \n"
  184. " sub %0,%0,16 \n"
  185. "3: ;nop \n"
  186. " .section .fixup, \"ax\" \n"
  187. " .align 4 \n"
  188. "4: j 3b \n"
  189. " .previous \n"
  190. " .section __ex_table, \"a\" \n"
  191. " .align 4 \n"
  192. " .word 1b, 4b \n"
  193. " .word 11b,4b \n"
  194. " .word 12b,4b \n"
  195. " .word 13b,4b \n"
  196. " .previous \n"
  197. : "+r" (res), "+r"(to), "+r"(from),
  198. "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
  199. : "ir"(n)
  200. : "lp_count", "memory");
  201. }
  202. if (orig_n / 8) {
  203. orig_n = orig_n % 8;
  204. __asm__ __volatile__(
  205. "14: ld.ab %3, [%2,4] \n"
  206. "15: ld.ab %4, [%2,4] \n"
  207. " st.ab %3, [%1,4] \n"
  208. " st.ab %4, [%1,4] \n"
  209. " sub %0,%0,8 \n"
  210. "31: ;nop \n"
  211. " .section .fixup, \"ax\" \n"
  212. " .align 4 \n"
  213. "4: j 31b \n"
  214. " .previous \n"
  215. " .section __ex_table, \"a\" \n"
  216. " .align 4 \n"
  217. " .word 14b,4b \n"
  218. " .word 15b,4b \n"
  219. " .previous \n"
  220. : "+r" (res), "+r"(to), "+r"(from),
  221. "=r"(tmp1), "=r"(tmp2)
  222. :
  223. : "memory");
  224. }
  225. if (orig_n / 4) {
  226. orig_n = orig_n % 4;
  227. __asm__ __volatile__(
  228. "16: ld.ab %3, [%2,4] \n"
  229. " st.ab %3, [%1,4] \n"
  230. " sub %0,%0,4 \n"
  231. "32: ;nop \n"
  232. " .section .fixup, \"ax\" \n"
  233. " .align 4 \n"
  234. "4: j 32b \n"
  235. " .previous \n"
  236. " .section __ex_table, \"a\" \n"
  237. " .align 4 \n"
  238. " .word 16b,4b \n"
  239. " .previous \n"
  240. : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
  241. :
  242. : "memory");
  243. }
  244. if (orig_n / 2) {
  245. orig_n = orig_n % 2;
  246. __asm__ __volatile__(
  247. "17: ldw.ab %3, [%2,2] \n"
  248. " stw.ab %3, [%1,2] \n"
  249. " sub %0,%0,2 \n"
  250. "33: ;nop \n"
  251. " .section .fixup, \"ax\" \n"
  252. " .align 4 \n"
  253. "4: j 33b \n"
  254. " .previous \n"
  255. " .section __ex_table, \"a\" \n"
  256. " .align 4 \n"
  257. " .word 17b,4b \n"
  258. " .previous \n"
  259. : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
  260. :
  261. : "memory");
  262. }
  263. if (orig_n & 1) {
  264. __asm__ __volatile__(
  265. "18: ldb.ab %3, [%2,2] \n"
  266. " stb.ab %3, [%1,2] \n"
  267. " sub %0,%0,1 \n"
  268. "34: ; nop \n"
  269. " .section .fixup, \"ax\" \n"
  270. " .align 4 \n"
  271. "4: j 34b \n"
  272. " .previous \n"
  273. " .section __ex_table, \"a\" \n"
  274. " .align 4 \n"
  275. " .word 18b,4b \n"
  276. " .previous \n"
  277. : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
  278. :
  279. : "memory");
  280. }
  281. } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
  282. __asm__ __volatile__(
  283. " mov %0,%3 \n"
  284. " lsr.f lp_count, %3,4 \n" /* 16x bytes */
  285. " lpnz 3f \n"
  286. "1: ld.ab %5, [%2, 4] \n"
  287. "11: ld.ab %6, [%2, 4] \n"
  288. "12: ld.ab %7, [%2, 4] \n"
  289. "13: ld.ab %8, [%2, 4] \n"
  290. " st.ab %5, [%1, 4] \n"
  291. " st.ab %6, [%1, 4] \n"
  292. " st.ab %7, [%1, 4] \n"
  293. " st.ab %8, [%1, 4] \n"
  294. " sub %0,%0,16 \n"
  295. "3: and.f %3,%3,0xf \n" /* stragglers */
  296. " bz 34f \n"
  297. " bbit0 %3,3,31f \n" /* 8 bytes left */
  298. "14: ld.ab %5, [%2,4] \n"
  299. "15: ld.ab %6, [%2,4] \n"
  300. " st.ab %5, [%1,4] \n"
  301. " st.ab %6, [%1,4] \n"
  302. " sub.f %0,%0,8 \n"
  303. "31: bbit0 %3,2,32f \n" /* 4 bytes left */
  304. "16: ld.ab %5, [%2,4] \n"
  305. " st.ab %5, [%1,4] \n"
  306. " sub.f %0,%0,4 \n"
  307. "32: bbit0 %3,1,33f \n" /* 2 bytes left */
  308. "17: ldw.ab %5, [%2,2] \n"
  309. " stw.ab %5, [%1,2] \n"
  310. " sub.f %0,%0,2 \n"
  311. "33: bbit0 %3,0,34f \n"
  312. "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
  313. " stb.ab %5, [%1,1] \n"
  314. " sub.f %0,%0,1 \n"
  315. "34: ;nop \n"
  316. " .section .fixup, \"ax\" \n"
  317. " .align 4 \n"
  318. "4: j 34b \n"
  319. " .previous \n"
  320. " .section __ex_table, \"a\" \n"
  321. " .align 4 \n"
  322. " .word 1b, 4b \n"
  323. " .word 11b,4b \n"
  324. " .word 12b,4b \n"
  325. " .word 13b,4b \n"
  326. " .word 14b,4b \n"
  327. " .word 15b,4b \n"
  328. " .word 16b,4b \n"
  329. " .word 17b,4b \n"
  330. " .word 18b,4b \n"
  331. " .previous \n"
  332. : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
  333. "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
  334. :
  335. : "lp_count", "memory");
  336. }
  337. return res;
  338. }
  339. static inline unsigned long
  340. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  341. {
  342. long res = 0;
  343. char val;
  344. unsigned long tmp1, tmp2, tmp3, tmp4;
  345. unsigned long orig_n = n;
  346. if (n == 0)
  347. return 0;
  348. /* unaligned */
  349. if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
  350. unsigned char tmp;
  351. __asm__ __volatile__(
  352. " mov.f lp_count, %0 \n"
  353. " lpnz 3f \n"
  354. " ldb.ab %1, [%3, 1] \n"
  355. "1: stb.ab %1, [%2, 1] \n"
  356. " sub %0, %0, 1 \n"
  357. "3: ;nop \n"
  358. " .section .fixup, \"ax\" \n"
  359. " .align 4 \n"
  360. "4: j 3b \n"
  361. " .previous \n"
  362. " .section __ex_table, \"a\" \n"
  363. " .align 4 \n"
  364. " .word 1b, 4b \n"
  365. " .previous \n"
  366. : "+r" (n),
  367. /* Note as an '&' earlyclobber operand to make sure the
  368. * temporary register inside the loop is not the same as
  369. * FROM or TO.
  370. */
  371. "=&r" (tmp), "+r" (to), "+r" (from)
  372. :
  373. : "lp_count", "memory");
  374. return n;
  375. }
  376. if (__builtin_constant_p(orig_n)) {
  377. res = orig_n;
  378. if (orig_n / 16) {
  379. orig_n = orig_n % 16;
  380. __asm__ __volatile__(
  381. " lsr lp_count, %7,4 \n"
  382. " lp 3f \n"
  383. " ld.ab %3, [%2, 4] \n"
  384. " ld.ab %4, [%2, 4] \n"
  385. " ld.ab %5, [%2, 4] \n"
  386. " ld.ab %6, [%2, 4] \n"
  387. "1: st.ab %3, [%1, 4] \n"
  388. "11: st.ab %4, [%1, 4] \n"
  389. "12: st.ab %5, [%1, 4] \n"
  390. "13: st.ab %6, [%1, 4] \n"
  391. " sub %0, %0, 16 \n"
  392. "3:;nop \n"
  393. " .section .fixup, \"ax\" \n"
  394. " .align 4 \n"
  395. "4: j 3b \n"
  396. " .previous \n"
  397. " .section __ex_table, \"a\" \n"
  398. " .align 4 \n"
  399. " .word 1b, 4b \n"
  400. " .word 11b,4b \n"
  401. " .word 12b,4b \n"
  402. " .word 13b,4b \n"
  403. " .previous \n"
  404. : "+r" (res), "+r"(to), "+r"(from),
  405. "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
  406. : "ir"(n)
  407. : "lp_count", "memory");
  408. }
  409. if (orig_n / 8) {
  410. orig_n = orig_n % 8;
  411. __asm__ __volatile__(
  412. " ld.ab %3, [%2,4] \n"
  413. " ld.ab %4, [%2,4] \n"
  414. "14: st.ab %3, [%1,4] \n"
  415. "15: st.ab %4, [%1,4] \n"
  416. " sub %0, %0, 8 \n"
  417. "31:;nop \n"
  418. " .section .fixup, \"ax\" \n"
  419. " .align 4 \n"
  420. "4: j 31b \n"
  421. " .previous \n"
  422. " .section __ex_table, \"a\" \n"
  423. " .align 4 \n"
  424. " .word 14b,4b \n"
  425. " .word 15b,4b \n"
  426. " .previous \n"
  427. : "+r" (res), "+r"(to), "+r"(from),
  428. "=r"(tmp1), "=r"(tmp2)
  429. :
  430. : "memory");
  431. }
  432. if (orig_n / 4) {
  433. orig_n = orig_n % 4;
  434. __asm__ __volatile__(
  435. " ld.ab %3, [%2,4] \n"
  436. "16: st.ab %3, [%1,4] \n"
  437. " sub %0, %0, 4 \n"
  438. "32:;nop \n"
  439. " .section .fixup, \"ax\" \n"
  440. " .align 4 \n"
  441. "4: j 32b \n"
  442. " .previous \n"
  443. " .section __ex_table, \"a\" \n"
  444. " .align 4 \n"
  445. " .word 16b,4b \n"
  446. " .previous \n"
  447. : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
  448. :
  449. : "memory");
  450. }
  451. if (orig_n / 2) {
  452. orig_n = orig_n % 2;
  453. __asm__ __volatile__(
  454. " ldw.ab %3, [%2,2] \n"
  455. "17: stw.ab %3, [%1,2] \n"
  456. " sub %0, %0, 2 \n"
  457. "33:;nop \n"
  458. " .section .fixup, \"ax\" \n"
  459. " .align 4 \n"
  460. "4: j 33b \n"
  461. " .previous \n"
  462. " .section __ex_table, \"a\" \n"
  463. " .align 4 \n"
  464. " .word 17b,4b \n"
  465. " .previous \n"
  466. : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
  467. :
  468. : "memory");
  469. }
  470. if (orig_n & 1) {
  471. __asm__ __volatile__(
  472. " ldb.ab %3, [%2,1] \n"
  473. "18: stb.ab %3, [%1,1] \n"
  474. " sub %0, %0, 1 \n"
  475. "34: ;nop \n"
  476. " .section .fixup, \"ax\" \n"
  477. " .align 4 \n"
  478. "4: j 34b \n"
  479. " .previous \n"
  480. " .section __ex_table, \"a\" \n"
  481. " .align 4 \n"
  482. " .word 18b,4b \n"
  483. " .previous \n"
  484. : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
  485. :
  486. : "memory");
  487. }
  488. } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
  489. __asm__ __volatile__(
  490. " mov %0,%3 \n"
  491. " lsr.f lp_count, %3,4 \n" /* 16x bytes */
  492. " lpnz 3f \n"
  493. " ld.ab %5, [%2, 4] \n"
  494. " ld.ab %6, [%2, 4] \n"
  495. " ld.ab %7, [%2, 4] \n"
  496. " ld.ab %8, [%2, 4] \n"
  497. "1: st.ab %5, [%1, 4] \n"
  498. "11: st.ab %6, [%1, 4] \n"
  499. "12: st.ab %7, [%1, 4] \n"
  500. "13: st.ab %8, [%1, 4] \n"
  501. " sub %0, %0, 16 \n"
  502. "3: and.f %3,%3,0xf \n" /* stragglers */
  503. " bz 34f \n"
  504. " bbit0 %3,3,31f \n" /* 8 bytes left */
  505. " ld.ab %5, [%2,4] \n"
  506. " ld.ab %6, [%2,4] \n"
  507. "14: st.ab %5, [%1,4] \n"
  508. "15: st.ab %6, [%1,4] \n"
  509. " sub.f %0, %0, 8 \n"
  510. "31: bbit0 %3,2,32f \n" /* 4 bytes left */
  511. " ld.ab %5, [%2,4] \n"
  512. "16: st.ab %5, [%1,4] \n"
  513. " sub.f %0, %0, 4 \n"
  514. "32: bbit0 %3,1,33f \n" /* 2 bytes left */
  515. " ldw.ab %5, [%2,2] \n"
  516. "17: stw.ab %5, [%1,2] \n"
  517. " sub.f %0, %0, 2 \n"
  518. "33: bbit0 %3,0,34f \n"
  519. " ldb.ab %5, [%2,1] \n" /* 1 byte left */
  520. "18: stb.ab %5, [%1,1] \n"
  521. " sub.f %0, %0, 1 \n"
  522. "34: ;nop \n"
  523. " .section .fixup, \"ax\" \n"
  524. " .align 4 \n"
  525. "4: j 34b \n"
  526. " .previous \n"
  527. " .section __ex_table, \"a\" \n"
  528. " .align 4 \n"
  529. " .word 1b, 4b \n"
  530. " .word 11b,4b \n"
  531. " .word 12b,4b \n"
  532. " .word 13b,4b \n"
  533. " .word 14b,4b \n"
  534. " .word 15b,4b \n"
  535. " .word 16b,4b \n"
  536. " .word 17b,4b \n"
  537. " .word 18b,4b \n"
  538. " .previous \n"
  539. : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
  540. "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
  541. :
  542. : "lp_count", "memory");
  543. }
  544. return res;
  545. }
  546. static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
  547. {
  548. long res = n;
  549. unsigned char *d_char = to;
  550. __asm__ __volatile__(
  551. " bbit0 %0, 0, 1f \n"
  552. "75: stb.ab %2, [%0,1] \n"
  553. " sub %1, %1, 1 \n"
  554. "1: bbit0 %0, 1, 2f \n"
  555. "76: stw.ab %2, [%0,2] \n"
  556. " sub %1, %1, 2 \n"
  557. "2: asr.f lp_count, %1, 2 \n"
  558. " lpnz 3f \n"
  559. "77: st.ab %2, [%0,4] \n"
  560. " sub %1, %1, 4 \n"
  561. "3: bbit0 %1, 1, 4f \n"
  562. "78: stw.ab %2, [%0,2] \n"
  563. " sub %1, %1, 2 \n"
  564. "4: bbit0 %1, 0, 5f \n"
  565. "79: stb.ab %2, [%0,1] \n"
  566. " sub %1, %1, 1 \n"
  567. "5: \n"
  568. " .section .fixup, \"ax\" \n"
  569. " .align 4 \n"
  570. "3: j 5b \n"
  571. " .previous \n"
  572. " .section __ex_table, \"a\" \n"
  573. " .align 4 \n"
  574. " .word 75b, 3b \n"
  575. " .word 76b, 3b \n"
  576. " .word 77b, 3b \n"
  577. " .word 78b, 3b \n"
  578. " .word 79b, 3b \n"
  579. " .previous \n"
  580. : "+r"(d_char), "+r"(res)
  581. : "i"(0)
  582. : "lp_count", "memory");
  583. return res;
  584. }
  585. #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
  586. #define INLINE_COPY_TO_USER
  587. #define INLINE_COPY_FROM_USER
  588. #define __clear_user(d, n) __arc_clear_user(d, n)
  589. #else
  590. extern unsigned long arc_clear_user_noinline(void __user *to,
  591. unsigned long n);
  592. #define __clear_user(d, n) arc_clear_user_noinline(d, n)
  593. #endif
  594. #include <asm-generic/uaccess.h>
  595. #endif