cvmx-fpa.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: [email protected]
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /**
  28. * @file
  29. *
  30. * Interface to the hardware Free Pool Allocator.
  31. *
  32. *
  33. */
  34. #ifndef __CVMX_FPA_H__
  35. #define __CVMX_FPA_H__
  36. #include <linux/delay.h>
  37. #include <asm/octeon/cvmx-address.h>
  38. #include <asm/octeon/cvmx-fpa-defs.h>
  39. #define CVMX_FPA_NUM_POOLS 8
  40. #define CVMX_FPA_MIN_BLOCK_SIZE 128
  41. #define CVMX_FPA_ALIGNMENT 128
  42. /**
  43. * Structure describing the data format used for stores to the FPA.
  44. */
  45. typedef union {
  46. uint64_t u64;
  47. struct {
  48. #ifdef __BIG_ENDIAN_BITFIELD
  49. /*
  50. * the (64-bit word) location in scratchpad to write
  51. * to (if len != 0)
  52. */
  53. uint64_t scraddr:8;
  54. /* the number of words in the response (0 => no response) */
  55. uint64_t len:8;
  56. /* the ID of the device on the non-coherent bus */
  57. uint64_t did:8;
  58. /*
  59. * the address that will appear in the first tick on
  60. * the NCB bus.
  61. */
  62. uint64_t addr:40;
  63. #else
  64. uint64_t addr:40;
  65. uint64_t did:8;
  66. uint64_t len:8;
  67. uint64_t scraddr:8;
  68. #endif
  69. } s;
  70. } cvmx_fpa_iobdma_data_t;
  71. /**
  72. * Structure describing the current state of a FPA pool.
  73. */
  74. typedef struct {
  75. /* Name it was created under */
  76. const char *name;
  77. /* Size of each block */
  78. uint64_t size;
  79. /* The base memory address of whole block */
  80. void *base;
  81. /* The number of elements in the pool at creation */
  82. uint64_t starting_element_count;
  83. } cvmx_fpa_pool_info_t;
  84. /**
  85. * Current state of all the pools. Use access functions
  86. * instead of using it directly.
  87. */
  88. extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
  89. /* CSR typedefs have been moved to cvmx-csr-*.h */
  90. /**
  91. * Return the name of the pool
  92. *
  93. * @pool: Pool to get the name of
  94. * Returns The name
  95. */
  96. static inline const char *cvmx_fpa_get_name(uint64_t pool)
  97. {
  98. return cvmx_fpa_pool_info[pool].name;
  99. }
  100. /**
  101. * Return the base of the pool
  102. *
  103. * @pool: Pool to get the base of
  104. * Returns The base
  105. */
  106. static inline void *cvmx_fpa_get_base(uint64_t pool)
  107. {
  108. return cvmx_fpa_pool_info[pool].base;
  109. }
  110. /**
  111. * Check if a pointer belongs to an FPA pool. Return non-zero
  112. * if the supplied pointer is inside the memory controlled by
  113. * an FPA pool.
  114. *
  115. * @pool: Pool to check
  116. * @ptr: Pointer to check
  117. * Returns Non-zero if pointer is in the pool. Zero if not
  118. */
  119. static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
  120. {
  121. return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
  122. ((char *)ptr <
  123. ((char *)(cvmx_fpa_pool_info[pool].base)) +
  124. cvmx_fpa_pool_info[pool].size *
  125. cvmx_fpa_pool_info[pool].starting_element_count));
  126. }
  127. /**
  128. * Enable the FPA for use. Must be performed after any CSR
  129. * configuration but before any other FPA functions.
  130. */
  131. static inline void cvmx_fpa_enable(void)
  132. {
  133. union cvmx_fpa_ctl_status status;
  134. status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
  135. if (status.s.enb) {
  136. cvmx_dprintf
  137. ("Warning: Enabling FPA when FPA already enabled.\n");
  138. }
  139. /*
  140. * Do runtime check as we allow pass1 compiled code to run on
  141. * pass2 chips.
  142. */
  143. if (cvmx_octeon_is_pass1()) {
  144. union cvmx_fpa_fpfx_marks marks;
  145. int i;
  146. for (i = 1; i < 8; i++) {
  147. marks.u64 =
  148. cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
  149. marks.s.fpf_wr = 0xe0;
  150. cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
  151. marks.u64);
  152. }
  153. /* Enforce a 10 cycle delay between config and enable */
  154. __delay(10);
  155. }
  156. /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
  157. status.u64 = 0;
  158. status.s.enb = 1;
  159. cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
  160. }
  161. /**
  162. * Get a new block from the FPA
  163. *
  164. * @pool: Pool to get the block from
  165. * Returns Pointer to the block or NULL on failure
  166. */
  167. static inline void *cvmx_fpa_alloc(uint64_t pool)
  168. {
  169. uint64_t address =
  170. cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
  171. if (address)
  172. return cvmx_phys_to_ptr(address);
  173. else
  174. return NULL;
  175. }
  176. /**
  177. * Asynchronously get a new block from the FPA
  178. *
  179. * @scr_addr: Local scratch address to put response in. This is a byte address,
  180. * but must be 8 byte aligned.
  181. * @pool: Pool to get the block from
  182. */
  183. static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
  184. {
  185. cvmx_fpa_iobdma_data_t data;
  186. /*
  187. * Hardware only uses 64 bit aligned locations, so convert
  188. * from byte address to 64-bit index
  189. */
  190. data.s.scraddr = scr_addr >> 3;
  191. data.s.len = 1;
  192. data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
  193. data.s.addr = 0;
  194. cvmx_send_single(data.u64);
  195. }
  196. /**
  197. * Free a block allocated with a FPA pool. Does NOT provide memory
  198. * ordering in cases where the memory block was modified by the core.
  199. *
  200. * @ptr: Block to free
  201. * @pool: Pool to put it in
  202. * @num_cache_lines:
  203. * Cache lines to invalidate
  204. */
  205. static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
  206. uint64_t num_cache_lines)
  207. {
  208. cvmx_addr_t newptr;
  209. newptr.u64 = cvmx_ptr_to_phys(ptr);
  210. newptr.sfilldidspace.didspace =
  211. CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
  212. /* Prevent GCC from reordering around free */
  213. barrier();
  214. /* value written is number of cache lines not written back */
  215. cvmx_write_io(newptr.u64, num_cache_lines);
  216. }
  217. /**
  218. * Free a block allocated with a FPA pool. Provides required memory
  219. * ordering in cases where memory block was modified by core.
  220. *
  221. * @ptr: Block to free
  222. * @pool: Pool to put it in
  223. * @num_cache_lines:
  224. * Cache lines to invalidate
  225. */
  226. static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
  227. uint64_t num_cache_lines)
  228. {
  229. cvmx_addr_t newptr;
  230. newptr.u64 = cvmx_ptr_to_phys(ptr);
  231. newptr.sfilldidspace.didspace =
  232. CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
  233. /*
  234. * Make sure that any previous writes to memory go out before
  235. * we free this buffer. This also serves as a barrier to
  236. * prevent GCC from reordering operations to after the
  237. * free.
  238. */
  239. CVMX_SYNCWS;
  240. /* value written is number of cache lines not written back */
  241. cvmx_write_io(newptr.u64, num_cache_lines);
  242. }
  243. /**
  244. * Shutdown a Memory pool and validate that it had all of
  245. * the buffers originally placed in it. This should only be
  246. * called by one processor after all hardware has finished
  247. * using the pool.
  248. *
  249. * @pool: Pool to shutdown
  250. * Returns Zero on success
  251. * - Positive is count of missing buffers
  252. * - Negative is too many buffers or corrupted pointers
  253. */
  254. extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
  255. /**
  256. * Get the size of blocks controlled by the pool
  257. * This is resolved to a constant at compile time.
  258. *
  259. * @pool: Pool to access
  260. * Returns Size of the block in bytes
  261. */
  262. uint64_t cvmx_fpa_get_block_size(uint64_t pool);
  263. #endif /* __CVM_FPA_H__ */