pble.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
  2. /* Copyright (c) 2015 - 2021 Intel Corporation */
  3. #include "osdep.h"
  4. #include "hmc.h"
  5. #include "defs.h"
  6. #include "type.h"
  7. #include "protos.h"
  8. #include "pble.h"
  9. static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
  10. /**
  11. * irdma_destroy_pble_prm - destroy prm during module unload
  12. * @pble_rsrc: pble resources
  13. */
  14. void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
  15. {
  16. struct irdma_chunk *chunk;
  17. struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
  18. while (!list_empty(&pinfo->clist)) {
  19. chunk = (struct irdma_chunk *) pinfo->clist.next;
  20. list_del(&chunk->list);
  21. if (chunk->type == PBLE_SD_PAGED)
  22. irdma_pble_free_paged_mem(chunk);
  23. bitmap_free(chunk->bitmapbuf);
  24. kfree(chunk->chunkmem.va);
  25. }
  26. }
  27. /**
  28. * irdma_hmc_init_pble - Initialize pble resources during module load
  29. * @dev: irdma_sc_dev struct
  30. * @pble_rsrc: pble resources
  31. */
  32. int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
  33. struct irdma_hmc_pble_rsrc *pble_rsrc)
  34. {
  35. struct irdma_hmc_info *hmc_info;
  36. u32 fpm_idx = 0;
  37. int status = 0;
  38. hmc_info = dev->hmc_info;
  39. pble_rsrc->dev = dev;
  40. pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
  41. /* Start pble' on 4k boundary */
  42. if (pble_rsrc->fpm_base_addr & 0xfff)
  43. fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
  44. pble_rsrc->unallocated_pble =
  45. hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
  46. pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
  47. pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
  48. mutex_init(&pble_rsrc->pble_mutex_lock);
  49. spin_lock_init(&pble_rsrc->pinfo.prm_lock);
  50. INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
  51. if (add_pble_prm(pble_rsrc)) {
  52. irdma_destroy_pble_prm(pble_rsrc);
  53. status = -ENOMEM;
  54. }
  55. return status;
  56. }
  57. /**
  58. * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
  59. * @pble_rsrc: structure containing fpm address
  60. * @idx: where to return indexes
  61. */
  62. static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
  63. struct sd_pd_idx *idx)
  64. {
  65. idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
  66. idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
  67. idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
  68. }
  69. /**
  70. * add_sd_direct - add sd direct for pble
  71. * @pble_rsrc: pble resource ptr
  72. * @info: page info for sd
  73. */
  74. static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
  75. struct irdma_add_page_info *info)
  76. {
  77. struct irdma_sc_dev *dev = pble_rsrc->dev;
  78. int ret_code = 0;
  79. struct sd_pd_idx *idx = &info->idx;
  80. struct irdma_chunk *chunk = info->chunk;
  81. struct irdma_hmc_info *hmc_info = info->hmc_info;
  82. struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
  83. u32 offset = 0;
  84. if (!sd_entry->valid) {
  85. ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
  86. info->idx.sd_idx,
  87. IRDMA_SD_TYPE_DIRECT,
  88. IRDMA_HMC_DIRECT_BP_SIZE);
  89. if (ret_code)
  90. return ret_code;
  91. chunk->type = PBLE_SD_CONTIGOUS;
  92. }
  93. offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
  94. chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
  95. chunk->vaddr = sd_entry->u.bp.addr.va + offset;
  96. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  97. ibdev_dbg(to_ibdev(dev),
  98. "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
  99. chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
  100. return 0;
  101. }
  102. /**
  103. * fpm_to_idx - given fpm address, get pble index
  104. * @pble_rsrc: pble resource management
  105. * @addr: fpm address for index
  106. */
  107. static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
  108. {
  109. u64 idx;
  110. idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
  111. return (u32)idx;
  112. }
  113. /**
  114. * add_bp_pages - add backing pages for sd
  115. * @pble_rsrc: pble resource management
  116. * @info: page info for sd
  117. */
  118. static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
  119. struct irdma_add_page_info *info)
  120. {
  121. struct irdma_sc_dev *dev = pble_rsrc->dev;
  122. u8 *addr;
  123. struct irdma_dma_mem mem;
  124. struct irdma_hmc_pd_entry *pd_entry;
  125. struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
  126. struct irdma_hmc_info *hmc_info = info->hmc_info;
  127. struct irdma_chunk *chunk = info->chunk;
  128. int status = 0;
  129. u32 rel_pd_idx = info->idx.rel_pd_idx;
  130. u32 pd_idx = info->idx.pd_idx;
  131. u32 i;
  132. if (irdma_pble_get_paged_mem(chunk, info->pages))
  133. return -ENOMEM;
  134. status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
  135. IRDMA_SD_TYPE_PAGED,
  136. IRDMA_HMC_DIRECT_BP_SIZE);
  137. if (status)
  138. goto error;
  139. addr = chunk->vaddr;
  140. for (i = 0; i < info->pages; i++) {
  141. mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
  142. mem.size = 4096;
  143. mem.va = addr;
  144. pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
  145. if (!pd_entry->valid) {
  146. status = irdma_add_pd_table_entry(dev, hmc_info,
  147. pd_idx++, &mem);
  148. if (status)
  149. goto error;
  150. addr += 4096;
  151. }
  152. }
  153. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  154. return 0;
  155. error:
  156. irdma_pble_free_paged_mem(chunk);
  157. return status;
  158. }
  159. /**
  160. * irdma_get_type - add a sd entry type for sd
  161. * @dev: irdma_sc_dev struct
  162. * @idx: index of sd
  163. * @pages: pages in the sd
  164. */
  165. static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
  166. struct sd_pd_idx *idx, u32 pages)
  167. {
  168. enum irdma_sd_entry_type sd_entry_type;
  169. sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
  170. IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
  171. return sd_entry_type;
  172. }
  173. /**
  174. * add_pble_prm - add a sd entry for pble resoure
  175. * @pble_rsrc: pble resource management
  176. */
  177. static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
  178. {
  179. struct irdma_sc_dev *dev = pble_rsrc->dev;
  180. struct irdma_hmc_sd_entry *sd_entry;
  181. struct irdma_hmc_info *hmc_info;
  182. struct irdma_chunk *chunk;
  183. struct irdma_add_page_info info;
  184. struct sd_pd_idx *idx = &info.idx;
  185. int ret_code = 0;
  186. enum irdma_sd_entry_type sd_entry_type;
  187. u64 sd_reg_val = 0;
  188. struct irdma_virt_mem chunkmem;
  189. u32 pages;
  190. if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
  191. return -ENOMEM;
  192. if (pble_rsrc->next_fpm_addr & 0xfff)
  193. return -EINVAL;
  194. chunkmem.size = sizeof(*chunk);
  195. chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
  196. if (!chunkmem.va)
  197. return -ENOMEM;
  198. chunk = chunkmem.va;
  199. chunk->chunkmem = chunkmem;
  200. hmc_info = dev->hmc_info;
  201. chunk->dev = dev;
  202. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  203. get_sd_pd_idx(pble_rsrc, idx);
  204. sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
  205. pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
  206. IRDMA_HMC_PD_CNT_IN_SD;
  207. pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
  208. info.chunk = chunk;
  209. info.hmc_info = hmc_info;
  210. info.pages = pages;
  211. info.sd_entry = sd_entry;
  212. if (!sd_entry->valid)
  213. sd_entry_type = irdma_get_type(dev, idx, pages);
  214. else
  215. sd_entry_type = sd_entry->entry_type;
  216. ibdev_dbg(to_ibdev(dev),
  217. "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
  218. pages, pble_rsrc->unallocated_pble,
  219. pble_rsrc->next_fpm_addr);
  220. ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
  221. if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
  222. ret_code = add_sd_direct(pble_rsrc, &info);
  223. if (ret_code)
  224. sd_entry_type = IRDMA_SD_TYPE_PAGED;
  225. else
  226. pble_rsrc->stats_direct_sds++;
  227. if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
  228. ret_code = add_bp_pages(pble_rsrc, &info);
  229. if (ret_code)
  230. goto error;
  231. else
  232. pble_rsrc->stats_paged_sds++;
  233. }
  234. ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
  235. if (ret_code)
  236. goto error;
  237. pble_rsrc->next_fpm_addr += chunk->size;
  238. ibdev_dbg(to_ibdev(dev),
  239. "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
  240. pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
  241. pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
  242. sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
  243. sd_entry->u.pd_table.pd_page_addr.pa :
  244. sd_entry->u.bp.addr.pa;
  245. if (!sd_entry->valid) {
  246. ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
  247. idx->sd_idx, sd_entry->entry_type, true);
  248. if (ret_code)
  249. goto error;
  250. }
  251. list_add(&chunk->list, &pble_rsrc->pinfo.clist);
  252. sd_entry->valid = true;
  253. return 0;
  254. error:
  255. bitmap_free(chunk->bitmapbuf);
  256. kfree(chunk->chunkmem.va);
  257. return ret_code;
  258. }
  259. /**
  260. * free_lvl2 - fee level 2 pble
  261. * @pble_rsrc: pble resource management
  262. * @palloc: level 2 pble allocation
  263. */
  264. static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
  265. struct irdma_pble_alloc *palloc)
  266. {
  267. u32 i;
  268. struct irdma_pble_level2 *lvl2 = &palloc->level2;
  269. struct irdma_pble_info *root = &lvl2->root;
  270. struct irdma_pble_info *leaf = lvl2->leaf;
  271. for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
  272. if (leaf->addr)
  273. irdma_prm_return_pbles(&pble_rsrc->pinfo,
  274. &leaf->chunkinfo);
  275. else
  276. break;
  277. }
  278. if (root->addr)
  279. irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
  280. kfree(lvl2->leafmem.va);
  281. lvl2->leaf = NULL;
  282. }
  283. /**
  284. * get_lvl2_pble - get level 2 pble resource
  285. * @pble_rsrc: pble resource management
  286. * @palloc: level 2 pble allocation
  287. */
  288. static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
  289. struct irdma_pble_alloc *palloc)
  290. {
  291. u32 lf4k, lflast, total, i;
  292. u32 pblcnt = PBLE_PER_PAGE;
  293. u64 *addr;
  294. struct irdma_pble_level2 *lvl2 = &palloc->level2;
  295. struct irdma_pble_info *root = &lvl2->root;
  296. struct irdma_pble_info *leaf;
  297. int ret_code;
  298. u64 fpm_addr;
  299. /* number of full 512 (4K) leafs) */
  300. lf4k = palloc->total_cnt >> 9;
  301. lflast = palloc->total_cnt % PBLE_PER_PAGE;
  302. total = (lflast == 0) ? lf4k : lf4k + 1;
  303. lvl2->leaf_cnt = total;
  304. lvl2->leafmem.size = (sizeof(*leaf) * total);
  305. lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
  306. if (!lvl2->leafmem.va)
  307. return -ENOMEM;
  308. lvl2->leaf = lvl2->leafmem.va;
  309. leaf = lvl2->leaf;
  310. ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
  311. total << 3, &root->addr, &fpm_addr);
  312. if (ret_code) {
  313. kfree(lvl2->leafmem.va);
  314. lvl2->leaf = NULL;
  315. return -ENOMEM;
  316. }
  317. root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
  318. root->cnt = total;
  319. addr = root->addr;
  320. for (i = 0; i < total; i++, leaf++) {
  321. pblcnt = (lflast && ((i + 1) == total)) ?
  322. lflast : PBLE_PER_PAGE;
  323. ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
  324. &leaf->chunkinfo, pblcnt << 3,
  325. &leaf->addr, &fpm_addr);
  326. if (ret_code)
  327. goto error;
  328. leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
  329. leaf->cnt = pblcnt;
  330. *addr = (u64)leaf->idx;
  331. addr++;
  332. }
  333. palloc->level = PBLE_LEVEL_2;
  334. pble_rsrc->stats_lvl2++;
  335. return 0;
  336. error:
  337. free_lvl2(pble_rsrc, palloc);
  338. return -ENOMEM;
  339. }
  340. /**
  341. * get_lvl1_pble - get level 1 pble resource
  342. * @pble_rsrc: pble resource management
  343. * @palloc: level 1 pble allocation
  344. */
  345. static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
  346. struct irdma_pble_alloc *palloc)
  347. {
  348. int ret_code;
  349. u64 fpm_addr;
  350. struct irdma_pble_info *lvl1 = &palloc->level1;
  351. ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
  352. palloc->total_cnt << 3, &lvl1->addr,
  353. &fpm_addr);
  354. if (ret_code)
  355. return -ENOMEM;
  356. palloc->level = PBLE_LEVEL_1;
  357. lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
  358. lvl1->cnt = palloc->total_cnt;
  359. pble_rsrc->stats_lvl1++;
  360. return 0;
  361. }
  362. /**
  363. * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
  364. * @pble_rsrc: pble resources
  365. * @palloc: contains all inforamtion regarding pble (idx + pble addr)
  366. * @level1_only: flag for a level 1 PBLE
  367. */
  368. static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
  369. struct irdma_pble_alloc *palloc, bool level1_only)
  370. {
  371. int status = 0;
  372. status = get_lvl1_pble(pble_rsrc, palloc);
  373. if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
  374. return status;
  375. status = get_lvl2_pble(pble_rsrc, palloc);
  376. return status;
  377. }
  378. /**
  379. * irdma_get_pble - allocate pbles from the prm
  380. * @pble_rsrc: pble resources
  381. * @palloc: contains all inforamtion regarding pble (idx + pble addr)
  382. * @pble_cnt: #of pbles requested
  383. * @level1_only: true if only pble level 1 to acquire
  384. */
  385. int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
  386. struct irdma_pble_alloc *palloc, u32 pble_cnt,
  387. bool level1_only)
  388. {
  389. int status = 0;
  390. int max_sds = 0;
  391. int i;
  392. palloc->total_cnt = pble_cnt;
  393. palloc->level = PBLE_LEVEL_0;
  394. mutex_lock(&pble_rsrc->pble_mutex_lock);
  395. /*check first to see if we can get pble's without acquiring
  396. * additional sd's
  397. */
  398. status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
  399. if (!status)
  400. goto exit;
  401. max_sds = (palloc->total_cnt >> 18) + 1;
  402. for (i = 0; i < max_sds; i++) {
  403. status = add_pble_prm(pble_rsrc);
  404. if (status)
  405. break;
  406. status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
  407. /* if level1_only, only go through it once */
  408. if (!status || level1_only)
  409. break;
  410. }
  411. exit:
  412. if (!status) {
  413. pble_rsrc->allocdpbles += pble_cnt;
  414. pble_rsrc->stats_alloc_ok++;
  415. } else {
  416. pble_rsrc->stats_alloc_fail++;
  417. }
  418. mutex_unlock(&pble_rsrc->pble_mutex_lock);
  419. return status;
  420. }
  421. /**
  422. * irdma_free_pble - put pbles back into prm
  423. * @pble_rsrc: pble resources
  424. * @palloc: contains all information regarding pble resource being freed
  425. */
  426. void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
  427. struct irdma_pble_alloc *palloc)
  428. {
  429. pble_rsrc->freedpbles += palloc->total_cnt;
  430. if (palloc->level == PBLE_LEVEL_2)
  431. free_lvl2(pble_rsrc, palloc);
  432. else
  433. irdma_prm_return_pbles(&pble_rsrc->pinfo,
  434. &palloc->level1.chunkinfo);
  435. pble_rsrc->stats_alloc_freed++;
  436. }