raid6test.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * asynchronous raid6 recovery self test
  4. * Copyright (c) 2009, Intel Corporation.
  5. *
  6. * based on drivers/md/raid6test/test.c:
  7. * Copyright 2002-2007 H. Peter Anvin
  8. */
  9. #include <linux/async_tx.h>
  10. #include <linux/gfp.h>
  11. #include <linux/mm.h>
  12. #include <linux/random.h>
  13. #include <linux/module.h>
  14. #undef pr
  15. #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
  16. #define NDISKS 64 /* Including P and Q */
  17. static struct page *dataptrs[NDISKS];
  18. unsigned int dataoffs[NDISKS];
  19. static addr_conv_t addr_conv[NDISKS];
  20. static struct page *data[NDISKS+3];
  21. static struct page *spare;
  22. static struct page *recovi;
  23. static struct page *recovj;
  24. static void callback(void *param)
  25. {
  26. struct completion *cmp = param;
  27. complete(cmp);
  28. }
  29. static void makedata(int disks)
  30. {
  31. int i;
  32. for (i = 0; i < disks; i++) {
  33. get_random_bytes(page_address(data[i]), PAGE_SIZE);
  34. dataptrs[i] = data[i];
  35. dataoffs[i] = 0;
  36. }
  37. }
  38. static char disk_type(int d, int disks)
  39. {
  40. if (d == disks - 2)
  41. return 'P';
  42. else if (d == disks - 1)
  43. return 'Q';
  44. else
  45. return 'D';
  46. }
  47. /* Recover two failed blocks. */
  48. static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
  49. struct page **ptrs, unsigned int *offs)
  50. {
  51. struct async_submit_ctl submit;
  52. struct completion cmp;
  53. struct dma_async_tx_descriptor *tx = NULL;
  54. enum sum_check_flags result = ~0;
  55. if (faila > failb)
  56. swap(faila, failb);
  57. if (failb == disks-1) {
  58. if (faila == disks-2) {
  59. /* P+Q failure. Just rebuild the syndrome. */
  60. init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
  61. tx = async_gen_syndrome(ptrs, offs,
  62. disks, bytes, &submit);
  63. } else {
  64. struct page *blocks[NDISKS];
  65. struct page *dest;
  66. int count = 0;
  67. int i;
  68. BUG_ON(disks > NDISKS);
  69. /* data+Q failure. Reconstruct data from P,
  70. * then rebuild syndrome
  71. */
  72. for (i = disks; i-- ; ) {
  73. if (i == faila || i == failb)
  74. continue;
  75. blocks[count++] = ptrs[i];
  76. }
  77. dest = ptrs[faila];
  78. init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
  79. NULL, NULL, addr_conv);
  80. tx = async_xor(dest, blocks, 0, count, bytes, &submit);
  81. init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
  82. tx = async_gen_syndrome(ptrs, offs,
  83. disks, bytes, &submit);
  84. }
  85. } else {
  86. if (failb == disks-2) {
  87. /* data+P failure. */
  88. init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
  89. tx = async_raid6_datap_recov(disks, bytes,
  90. faila, ptrs, offs, &submit);
  91. } else {
  92. /* data+data failure. */
  93. init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
  94. tx = async_raid6_2data_recov(disks, bytes,
  95. faila, failb, ptrs, offs, &submit);
  96. }
  97. }
  98. init_completion(&cmp);
  99. init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
  100. tx = async_syndrome_val(ptrs, offs,
  101. disks, bytes, &result, spare, 0, &submit);
  102. async_tx_issue_pending(tx);
  103. if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
  104. pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
  105. __func__, faila, failb, disks);
  106. if (result != 0)
  107. pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
  108. __func__, faila, failb, result);
  109. }
  110. static int test_disks(int i, int j, int disks)
  111. {
  112. int erra, errb;
  113. memset(page_address(recovi), 0xf0, PAGE_SIZE);
  114. memset(page_address(recovj), 0xba, PAGE_SIZE);
  115. dataptrs[i] = recovi;
  116. dataptrs[j] = recovj;
  117. raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs, dataoffs);
  118. erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
  119. errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
  120. pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
  121. __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
  122. (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
  123. dataptrs[i] = data[i];
  124. dataptrs[j] = data[j];
  125. return erra || errb;
  126. }
  127. static int test(int disks, int *tests)
  128. {
  129. struct dma_async_tx_descriptor *tx;
  130. struct async_submit_ctl submit;
  131. struct completion cmp;
  132. int err = 0;
  133. int i, j;
  134. recovi = data[disks];
  135. recovj = data[disks+1];
  136. spare = data[disks+2];
  137. makedata(disks);
  138. /* Nuke syndromes */
  139. memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
  140. memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
  141. /* Generate assumed good syndrome */
  142. init_completion(&cmp);
  143. init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
  144. tx = async_gen_syndrome(dataptrs, dataoffs, disks, PAGE_SIZE, &submit);
  145. async_tx_issue_pending(tx);
  146. if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
  147. pr("error: initial gen_syndrome(%d) timed out\n", disks);
  148. return 1;
  149. }
  150. pr("testing the %d-disk case...\n", disks);
  151. for (i = 0; i < disks-1; i++)
  152. for (j = i+1; j < disks; j++) {
  153. (*tests)++;
  154. err += test_disks(i, j, disks);
  155. }
  156. return err;
  157. }
  158. static int __init raid6_test(void)
  159. {
  160. int err = 0;
  161. int tests = 0;
  162. int i;
  163. for (i = 0; i < NDISKS+3; i++) {
  164. data[i] = alloc_page(GFP_KERNEL);
  165. if (!data[i]) {
  166. while (i--)
  167. put_page(data[i]);
  168. return -ENOMEM;
  169. }
  170. }
  171. /* the 4-disk and 5-disk cases are special for the recovery code */
  172. if (NDISKS > 4)
  173. err += test(4, &tests);
  174. if (NDISKS > 5)
  175. err += test(5, &tests);
  176. /* the 11 and 12 disk cases are special for ioatdma (p-disabled
  177. * q-continuation without extended descriptor)
  178. */
  179. if (NDISKS > 12) {
  180. err += test(11, &tests);
  181. err += test(12, &tests);
  182. }
  183. /* the 24 disk case is special for ioatdma as it is the boundary point
  184. * at which it needs to switch from 8-source ops to 16-source
  185. * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
  186. */
  187. if (NDISKS > 24)
  188. err += test(24, &tests);
  189. err += test(NDISKS, &tests);
  190. pr("\n");
  191. pr("complete (%d tests, %d failure%s)\n",
  192. tests, err, err == 1 ? "" : "s");
  193. for (i = 0; i < NDISKS+3; i++)
  194. put_page(data[i]);
  195. return 0;
  196. }
  197. static void __exit raid6_test_exit(void)
  198. {
  199. }
  200. /* when compiled-in wait for drivers to load first (assumes dma drivers
  201. * are also compiled-in)
  202. */
  203. late_initcall(raid6_test);
  204. module_exit(raid6_test_exit);
  205. MODULE_AUTHOR("Dan Williams <[email protected]>");
  206. MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
  207. MODULE_LICENSE("GPL");