adi_64.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* adi_64.c: support for ADI (Application Data Integrity) feature on
  3. * sparc m7 and newer processors. This feature is also known as
  4. * SSM (Silicon Secured Memory).
  5. *
  6. * Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved.
  7. * Author: Khalid Aziz ([email protected])
  8. */
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/mm_types.h>
  12. #include <asm/mdesc.h>
  13. #include <asm/adi_64.h>
  14. #include <asm/mmu_64.h>
  15. #include <asm/pgtable_64.h>
  16. /* Each page of storage for ADI tags can accommodate tags for 128
  17. * pages. When ADI enabled pages are being swapped out, it would be
  18. * prudent to allocate at least enough tag storage space to accommodate
  19. * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to
  20. * store tags for four SWAPFILE_CLUSTER pages to reduce need for
  21. * further allocations for same vma.
  22. */
  23. #define TAG_STORAGE_PAGES 8
  24. struct adi_config adi_state;
  25. EXPORT_SYMBOL(adi_state);
  26. /* mdesc_adi_init() : Parse machine description provided by the
  27. * hypervisor to detect ADI capabilities
  28. *
  29. * Hypervisor reports ADI capabilities of platform in "hwcap-list" property
  30. * for "cpu" node. If the platform supports ADI, "hwcap-list" property
  31. * contains the keyword "adp". If the platform supports ADI, "platform"
  32. * node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties
  33. * to describe the ADI capabilities.
  34. */
  35. void __init mdesc_adi_init(void)
  36. {
  37. struct mdesc_handle *hp = mdesc_grab();
  38. const char *prop;
  39. u64 pn, *val;
  40. int len;
  41. if (!hp)
  42. goto adi_not_found;
  43. pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
  44. if (pn == MDESC_NODE_NULL)
  45. goto adi_not_found;
  46. prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
  47. if (!prop)
  48. goto adi_not_found;
  49. /*
  50. * Look for "adp" keyword in hwcap-list which would indicate
  51. * ADI support
  52. */
  53. adi_state.enabled = false;
  54. while (len) {
  55. int plen;
  56. if (!strcmp(prop, "adp")) {
  57. adi_state.enabled = true;
  58. break;
  59. }
  60. plen = strlen(prop) + 1;
  61. prop += plen;
  62. len -= plen;
  63. }
  64. if (!adi_state.enabled)
  65. goto adi_not_found;
  66. /* Find the ADI properties in "platform" node. If all ADI
  67. * properties are not found, ADI support is incomplete and
  68. * do not enable ADI in the kernel.
  69. */
  70. pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
  71. if (pn == MDESC_NODE_NULL)
  72. goto adi_not_found;
  73. val = (u64 *) mdesc_get_property(hp, pn, "adp-blksz", &len);
  74. if (!val)
  75. goto adi_not_found;
  76. adi_state.caps.blksz = *val;
  77. val = (u64 *) mdesc_get_property(hp, pn, "adp-nbits", &len);
  78. if (!val)
  79. goto adi_not_found;
  80. adi_state.caps.nbits = *val;
  81. val = (u64 *) mdesc_get_property(hp, pn, "ue-on-adp", &len);
  82. if (!val)
  83. goto adi_not_found;
  84. adi_state.caps.ue_on_adi = *val;
  85. /* Some of the code to support swapping ADI tags is written
  86. * assumption that two ADI tags can fit inside one byte. If
  87. * this assumption is broken by a future architecture change,
  88. * that code will have to be revisited. If that were to happen,
  89. * disable ADI support so we do not get unpredictable results
  90. * with programs trying to use ADI and their pages getting
  91. * swapped out
  92. */
  93. if (adi_state.caps.nbits > 4) {
  94. pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n");
  95. adi_state.enabled = false;
  96. }
  97. mdesc_release(hp);
  98. return;
  99. adi_not_found:
  100. adi_state.enabled = false;
  101. adi_state.caps.blksz = 0;
  102. adi_state.caps.nbits = 0;
  103. if (hp)
  104. mdesc_release(hp);
  105. }
  106. tag_storage_desc_t *find_tag_store(struct mm_struct *mm,
  107. struct vm_area_struct *vma,
  108. unsigned long addr)
  109. {
  110. tag_storage_desc_t *tag_desc = NULL;
  111. unsigned long i, max_desc, flags;
  112. /* Check if this vma already has tag storage descriptor
  113. * allocated for it.
  114. */
  115. max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
  116. if (mm->context.tag_store) {
  117. tag_desc = mm->context.tag_store;
  118. spin_lock_irqsave(&mm->context.tag_lock, flags);
  119. for (i = 0; i < max_desc; i++) {
  120. if ((addr >= tag_desc->start) &&
  121. ((addr + PAGE_SIZE - 1) <= tag_desc->end))
  122. break;
  123. tag_desc++;
  124. }
  125. spin_unlock_irqrestore(&mm->context.tag_lock, flags);
  126. /* If no matching entries were found, this must be a
  127. * freshly allocated page
  128. */
  129. if (i >= max_desc)
  130. tag_desc = NULL;
  131. }
  132. return tag_desc;
  133. }
  134. tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm,
  135. struct vm_area_struct *vma,
  136. unsigned long addr)
  137. {
  138. unsigned char *tags;
  139. unsigned long i, size, max_desc, flags;
  140. tag_storage_desc_t *tag_desc, *open_desc;
  141. unsigned long end_addr, hole_start, hole_end;
  142. max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
  143. open_desc = NULL;
  144. hole_start = 0;
  145. hole_end = ULONG_MAX;
  146. end_addr = addr + PAGE_SIZE - 1;
  147. /* Check if this vma already has tag storage descriptor
  148. * allocated for it.
  149. */
  150. spin_lock_irqsave(&mm->context.tag_lock, flags);
  151. if (mm->context.tag_store) {
  152. tag_desc = mm->context.tag_store;
  153. /* Look for a matching entry for this address. While doing
  154. * that, look for the first open slot as well and find
  155. * the hole in already allocated range where this request
  156. * will fit in.
  157. */
  158. for (i = 0; i < max_desc; i++) {
  159. if (tag_desc->tag_users == 0) {
  160. if (open_desc == NULL)
  161. open_desc = tag_desc;
  162. } else {
  163. if ((addr >= tag_desc->start) &&
  164. (tag_desc->end >= (addr + PAGE_SIZE - 1))) {
  165. tag_desc->tag_users++;
  166. goto out;
  167. }
  168. }
  169. if ((tag_desc->start > end_addr) &&
  170. (tag_desc->start < hole_end))
  171. hole_end = tag_desc->start;
  172. if ((tag_desc->end < addr) &&
  173. (tag_desc->end > hole_start))
  174. hole_start = tag_desc->end;
  175. tag_desc++;
  176. }
  177. } else {
  178. size = sizeof(tag_storage_desc_t)*max_desc;
  179. mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
  180. if (mm->context.tag_store == NULL) {
  181. tag_desc = NULL;
  182. goto out;
  183. }
  184. tag_desc = mm->context.tag_store;
  185. for (i = 0; i < max_desc; i++, tag_desc++)
  186. tag_desc->tag_users = 0;
  187. open_desc = mm->context.tag_store;
  188. i = 0;
  189. }
  190. /* Check if we ran out of tag storage descriptors */
  191. if (open_desc == NULL) {
  192. tag_desc = NULL;
  193. goto out;
  194. }
  195. /* Mark this tag descriptor slot in use and then initialize it */
  196. tag_desc = open_desc;
  197. tag_desc->tag_users = 1;
  198. /* Tag storage has not been allocated for this vma and space
  199. * is available in tag storage descriptor. Since this page is
  200. * being swapped out, there is high probability subsequent pages
  201. * in the VMA will be swapped out as well. Allocate pages to
  202. * store tags for as many pages in this vma as possible but not
  203. * more than TAG_STORAGE_PAGES. Each byte in tag space holds
  204. * two ADI tags since each ADI tag is 4 bits. Each ADI tag
  205. * covers adi_blksize() worth of addresses. Check if the hole is
  206. * big enough to accommodate full address range for using
  207. * TAG_STORAGE_PAGES number of tag pages.
  208. */
  209. size = TAG_STORAGE_PAGES * PAGE_SIZE;
  210. end_addr = addr + (size*2*adi_blksize()) - 1;
  211. /* Check for overflow. If overflow occurs, allocate only one page */
  212. if (end_addr < addr) {
  213. size = PAGE_SIZE;
  214. end_addr = addr + (size*2*adi_blksize()) - 1;
  215. /* If overflow happens with the minimum tag storage
  216. * allocation as well, adjust ending address for this
  217. * tag storage.
  218. */
  219. if (end_addr < addr)
  220. end_addr = ULONG_MAX;
  221. }
  222. if (hole_end < end_addr) {
  223. /* Available hole is too small on the upper end of
  224. * address. Can we expand the range towards the lower
  225. * address and maximize use of this slot?
  226. */
  227. unsigned long tmp_addr;
  228. end_addr = hole_end - 1;
  229. tmp_addr = end_addr - (size*2*adi_blksize()) + 1;
  230. /* Check for underflow. If underflow occurs, allocate
  231. * only one page for storing ADI tags
  232. */
  233. if (tmp_addr > addr) {
  234. size = PAGE_SIZE;
  235. tmp_addr = end_addr - (size*2*adi_blksize()) - 1;
  236. /* If underflow happens with the minimum tag storage
  237. * allocation as well, adjust starting address for
  238. * this tag storage.
  239. */
  240. if (tmp_addr > addr)
  241. tmp_addr = 0;
  242. }
  243. if (tmp_addr < hole_start) {
  244. /* Available hole is restricted on lower address
  245. * end as well
  246. */
  247. tmp_addr = hole_start + 1;
  248. }
  249. addr = tmp_addr;
  250. size = (end_addr + 1 - addr)/(2*adi_blksize());
  251. size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE;
  252. size = size * PAGE_SIZE;
  253. }
  254. tags = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
  255. if (tags == NULL) {
  256. tag_desc->tag_users = 0;
  257. tag_desc = NULL;
  258. goto out;
  259. }
  260. tag_desc->start = addr;
  261. tag_desc->tags = tags;
  262. tag_desc->end = end_addr;
  263. out:
  264. spin_unlock_irqrestore(&mm->context.tag_lock, flags);
  265. return tag_desc;
  266. }
  267. void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm)
  268. {
  269. unsigned long flags;
  270. unsigned char *tags = NULL;
  271. spin_lock_irqsave(&mm->context.tag_lock, flags);
  272. tag_desc->tag_users--;
  273. if (tag_desc->tag_users == 0) {
  274. tag_desc->start = tag_desc->end = 0;
  275. /* Do not free up the tag storage space allocated
  276. * by the first descriptor. This is persistent
  277. * emergency tag storage space for the task.
  278. */
  279. if (tag_desc != mm->context.tag_store) {
  280. tags = tag_desc->tags;
  281. tag_desc->tags = NULL;
  282. }
  283. }
  284. spin_unlock_irqrestore(&mm->context.tag_lock, flags);
  285. kfree(tags);
  286. }
  287. #define tag_start(addr, tag_desc) \
  288. ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize())))
  289. /* Retrieve any saved ADI tags for the page being swapped back in and
  290. * restore these tags to the newly allocated physical page.
  291. */
  292. void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
  293. unsigned long addr, pte_t pte)
  294. {
  295. unsigned char *tag;
  296. tag_storage_desc_t *tag_desc;
  297. unsigned long paddr, tmp, version1, version2;
  298. /* Check if the swapped out page has an ADI version
  299. * saved. If yes, restore version tag to the newly
  300. * allocated page.
  301. */
  302. tag_desc = find_tag_store(mm, vma, addr);
  303. if (tag_desc == NULL)
  304. return;
  305. tag = tag_start(addr, tag_desc);
  306. paddr = pte_val(pte) & _PAGE_PADDR_4V;
  307. for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
  308. version1 = (*tag) >> 4;
  309. version2 = (*tag) & 0x0f;
  310. *tag++ = 0;
  311. asm volatile("stxa %0, [%1] %2\n\t"
  312. :
  313. : "r" (version1), "r" (tmp),
  314. "i" (ASI_MCD_REAL));
  315. tmp += adi_blksize();
  316. asm volatile("stxa %0, [%1] %2\n\t"
  317. :
  318. : "r" (version2), "r" (tmp),
  319. "i" (ASI_MCD_REAL));
  320. }
  321. asm volatile("membar #Sync\n\t");
  322. /* Check and mark this tag space for release later if
  323. * the swapped in page was the last user of tag space
  324. */
  325. del_tag_store(tag_desc, mm);
  326. }
  327. /* A page is about to be swapped out. Save any ADI tags associated with
  328. * this physical page so they can be restored later when the page is swapped
  329. * back in.
  330. */
  331. int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
  332. unsigned long addr, pte_t oldpte)
  333. {
  334. unsigned char *tag;
  335. tag_storage_desc_t *tag_desc;
  336. unsigned long version1, version2, paddr, tmp;
  337. tag_desc = alloc_tag_store(mm, vma, addr);
  338. if (tag_desc == NULL)
  339. return -1;
  340. tag = tag_start(addr, tag_desc);
  341. paddr = pte_val(oldpte) & _PAGE_PADDR_4V;
  342. for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
  343. asm volatile("ldxa [%1] %2, %0\n\t"
  344. : "=r" (version1)
  345. : "r" (tmp), "i" (ASI_MCD_REAL));
  346. tmp += adi_blksize();
  347. asm volatile("ldxa [%1] %2, %0\n\t"
  348. : "=r" (version2)
  349. : "r" (tmp), "i" (ASI_MCD_REAL));
  350. *tag = (version1 << 4) | version2;
  351. tag++;
  352. }
  353. return 0;
  354. }