cvmx-cmd-queue.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: [email protected]
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /*
  28. *
  29. * Support functions for managing command queues used for
  30. * various hardware blocks.
  31. *
  32. * The common command queue infrastructure abstracts out the
  33. * software necessary for adding to Octeon's chained queue
  34. * structures. These structures are used for commands to the
  35. * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
  36. * hardware unit takes commands and CSRs of different types,
  37. * they all use basic linked command buffers to store the
  38. * pending request. In general, users of the CVMX API don't
  39. * call cvmx-cmd-queue functions directly. Instead the hardware
  40. * unit specific wrapper should be used. The wrappers perform
  41. * unit specific validation and CSR writes to submit the
  42. * commands.
  43. *
  44. * Even though most software will never directly interact with
  45. * cvmx-cmd-queue, knowledge of its internal working can help
  46. * in diagnosing performance problems and help with debugging.
  47. *
  48. * Command queue pointers are stored in a global named block
  49. * called "cvmx_cmd_queues". Except for the PKO queues, each
  50. * hardware queue is stored in its own cache line to reduce SMP
  51. * contention on spin locks. The PKO queues are stored such that
  52. * every 16th queue is next to each other in memory. This scheme
  53. * allows for queues being in separate cache lines when there
  54. * are low number of queues per port. With 16 queues per port,
  55. * the first queue for each port is in the same cache area. The
  56. * second queues for each port are in another area, etc. This
  57. * allows software to implement very efficient lockless PKO with
  58. * 16 queues per port using a minimum of cache lines per core.
  59. * All queues for a given core will be isolated in the same
  60. * cache area.
  61. *
  62. * In addition to the memory pointer layout, cvmx-cmd-queue
  63. * provides an optimized fair ll/sc locking mechanism for the
  64. * queues. The lock uses a "ticket / now serving" model to
  65. * maintain fair order on contended locks. In addition, it uses
  66. * predicted locking time to limit cache contention. When a core
  67. * know it must wait in line for a lock, it spins on the
  68. * internal cycle counter to completely eliminate any causes of
  69. * bus traffic.
  70. *
  71. */
  72. #ifndef __CVMX_CMD_QUEUE_H__
  73. #define __CVMX_CMD_QUEUE_H__
  74. #include <linux/prefetch.h>
  75. #include <asm/compiler.h>
  76. #include <asm/octeon/cvmx-fpa.h>
  77. /**
  78. * By default we disable the max depth support. Most programs
  79. * don't use it and it slows down the command queue processing
  80. * significantly.
  81. */
  82. #ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
  83. #define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
  84. #endif
  85. /**
  86. * Enumeration representing all hardware blocks that use command
  87. * queues. Each hardware block has up to 65536 sub identifiers for
  88. * multiple command queues. Not all chips support all hardware
  89. * units.
  90. */
  91. typedef enum {
  92. CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
  93. #define CVMX_CMD_QUEUE_PKO(queue) \
  94. ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
  95. CVMX_CMD_QUEUE_ZIP = 0x10000,
  96. CVMX_CMD_QUEUE_DFA = 0x20000,
  97. CVMX_CMD_QUEUE_RAID = 0x30000,
  98. CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
  99. #define CVMX_CMD_QUEUE_DMA(queue) \
  100. ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
  101. CVMX_CMD_QUEUE_END = 0x50000,
  102. } cvmx_cmd_queue_id_t;
  103. /**
  104. * Command write operations can fail if the command queue needs
  105. * a new buffer and the associated FPA pool is empty. It can also
  106. * fail if the number of queued command words reaches the maximum
  107. * set at initialization.
  108. */
  109. typedef enum {
  110. CVMX_CMD_QUEUE_SUCCESS = 0,
  111. CVMX_CMD_QUEUE_NO_MEMORY = -1,
  112. CVMX_CMD_QUEUE_FULL = -2,
  113. CVMX_CMD_QUEUE_INVALID_PARAM = -3,
  114. CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
  115. } cvmx_cmd_queue_result_t;
  116. typedef struct {
  117. /* You have lock when this is your ticket */
  118. uint8_t now_serving;
  119. uint64_t unused1:24;
  120. /* Maximum outstanding command words */
  121. uint32_t max_depth;
  122. /* FPA pool buffers come from */
  123. uint64_t fpa_pool:3;
  124. /* Top of command buffer pointer shifted 7 */
  125. uint64_t base_ptr_div128:29;
  126. uint64_t unused2:6;
  127. /* FPA buffer size in 64bit words minus 1 */
  128. uint64_t pool_size_m1:13;
  129. /* Number of commands already used in buffer */
  130. uint64_t index:13;
  131. } __cvmx_cmd_queue_state_t;
  132. /**
  133. * This structure contains the global state of all command queues.
  134. * It is stored in a bootmem named block and shared by all
  135. * applications running on Octeon. Tickets are stored in a differnet
  136. * cache line that queue information to reduce the contention on the
  137. * ll/sc used to get a ticket. If this is not the case, the update
  138. * of queue state causes the ll/sc to fail quite often.
  139. */
  140. typedef struct {
  141. uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
  142. __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
  143. } __cvmx_cmd_queue_all_state_t;
  144. /**
  145. * Initialize a command queue for use. The initial FPA buffer is
  146. * allocated and the hardware unit is configured to point to the
  147. * new command queue.
  148. *
  149. * @queue_id: Hardware command queue to initialize.
  150. * @max_depth: Maximum outstanding commands that can be queued.
  151. * @fpa_pool: FPA pool the command queues should come from.
  152. * @pool_size: Size of each buffer in the FPA pool (bytes)
  153. *
  154. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  155. */
  156. cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
  157. int max_depth, int fpa_pool,
  158. int pool_size);
  159. /**
  160. * Shutdown a queue a free it's command buffers to the FPA. The
  161. * hardware connected to the queue must be stopped before this
  162. * function is called.
  163. *
  164. * @queue_id: Queue to shutdown
  165. *
  166. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  167. */
  168. cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
  169. /**
  170. * Return the number of command words pending in the queue. This
  171. * function may be relatively slow for some hardware units.
  172. *
  173. * @queue_id: Hardware command queue to query
  174. *
  175. * Returns Number of outstanding commands
  176. */
  177. int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
  178. /**
  179. * Return the command buffer to be written to. The purpose of this
  180. * function is to allow CVMX routine access t othe low level buffer
  181. * for initial hardware setup. User applications should not call this
  182. * function directly.
  183. *
  184. * @queue_id: Command queue to query
  185. *
  186. * Returns Command buffer or NULL on failure
  187. */
  188. void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
  189. /**
  190. * Get the index into the state arrays for the supplied queue id.
  191. *
  192. * @queue_id: Queue ID to get an index for
  193. *
  194. * Returns Index into the state arrays
  195. */
  196. static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
  197. {
  198. /*
  199. * Warning: This code currently only works with devices that
  200. * have 256 queues or less. Devices with more than 16 queues
  201. * are laid out in memory to allow cores quick access to
  202. * every 16th queue. This reduces cache thrashing when you are
  203. * running 16 queues per port to support lockless operation.
  204. */
  205. int unit = queue_id >> 16;
  206. int q = (queue_id >> 4) & 0xf;
  207. int core = queue_id & 0xf;
  208. return unit * 256 + core * 16 + q;
  209. }
  210. /**
  211. * Lock the supplied queue so nobody else is updating it at the same
  212. * time as us.
  213. *
  214. * @queue_id: Queue ID to lock
  215. * @qptr: Pointer to the queue's global state
  216. */
  217. static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
  218. __cvmx_cmd_queue_state_t *qptr)
  219. {
  220. extern __cvmx_cmd_queue_all_state_t
  221. *__cvmx_cmd_queue_state_ptr;
  222. int tmp;
  223. int my_ticket;
  224. prefetch(qptr);
  225. asm volatile (
  226. ".set push\n"
  227. ".set noreorder\n"
  228. "1:\n"
  229. /* Atomic add one to ticket_ptr */
  230. "ll %[my_ticket], %[ticket_ptr]\n"
  231. /* and store the original value */
  232. "li %[ticket], 1\n"
  233. /* in my_ticket */
  234. "baddu %[ticket], %[my_ticket]\n"
  235. "sc %[ticket], %[ticket_ptr]\n"
  236. "beqz %[ticket], 1b\n"
  237. " nop\n"
  238. /* Load the current now_serving ticket */
  239. "lbu %[ticket], %[now_serving]\n"
  240. "2:\n"
  241. /* Jump out if now_serving == my_ticket */
  242. "beq %[ticket], %[my_ticket], 4f\n"
  243. /* Find out how many tickets are in front of me */
  244. " subu %[ticket], %[my_ticket], %[ticket]\n"
  245. /* Use tickets in front of me minus one to delay */
  246. "subu %[ticket], 1\n"
  247. /* Delay will be ((tickets in front)-1)*32 loops */
  248. "cins %[ticket], %[ticket], 5, 7\n"
  249. "3:\n"
  250. /* Loop here until our ticket might be up */
  251. "bnez %[ticket], 3b\n"
  252. " subu %[ticket], 1\n"
  253. /* Jump back up to check out ticket again */
  254. "b 2b\n"
  255. /* Load the current now_serving ticket */
  256. " lbu %[ticket], %[now_serving]\n"
  257. "4:\n"
  258. ".set pop\n" :
  259. [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
  260. [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
  261. [my_ticket] "=r"(my_ticket)
  262. );
  263. }
  264. /**
  265. * Unlock the queue, flushing all writes.
  266. *
  267. * @qptr: Queue to unlock
  268. */
  269. static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
  270. {
  271. qptr->now_serving++;
  272. CVMX_SYNCWS;
  273. }
  274. /**
  275. * Get the queue state structure for the given queue id
  276. *
  277. * @queue_id: Queue id to get
  278. *
  279. * Returns Queue structure or NULL on failure
  280. */
  281. static inline __cvmx_cmd_queue_state_t
  282. *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
  283. {
  284. extern __cvmx_cmd_queue_all_state_t
  285. *__cvmx_cmd_queue_state_ptr;
  286. return &__cvmx_cmd_queue_state_ptr->
  287. state[__cvmx_cmd_queue_get_index(queue_id)];
  288. }
  289. /**
  290. * Write an arbitrary number of command words to a command queue.
  291. * This is a generic function; the fixed number of command word
  292. * functions yield higher performance.
  293. *
  294. * @queue_id: Hardware command queue to write to
  295. * @use_locking:
  296. * Use internal locking to ensure exclusive access for queue
  297. * updates. If you don't use this locking you must ensure
  298. * exclusivity some other way. Locking is strongly recommended.
  299. * @cmd_count: Number of command words to write
  300. * @cmds: Array of commands to write
  301. *
  302. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  303. */
  304. static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
  305. queue_id,
  306. int use_locking,
  307. int cmd_count,
  308. uint64_t *cmds)
  309. {
  310. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  311. /* Make sure nobody else is updating the same queue */
  312. if (likely(use_locking))
  313. __cvmx_cmd_queue_lock(queue_id, qptr);
  314. /*
  315. * If a max queue length was specified then make sure we don't
  316. * exceed it. If any part of the command would be below the
  317. * limit we allow it.
  318. */
  319. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
  320. if (unlikely
  321. (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
  322. if (likely(use_locking))
  323. __cvmx_cmd_queue_unlock(qptr);
  324. return CVMX_CMD_QUEUE_FULL;
  325. }
  326. }
  327. /*
  328. * Normally there is plenty of room in the current buffer for
  329. * the command.
  330. */
  331. if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
  332. uint64_t *ptr =
  333. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  334. base_ptr_div128 << 7);
  335. ptr += qptr->index;
  336. qptr->index += cmd_count;
  337. while (cmd_count--)
  338. *ptr++ = *cmds++;
  339. } else {
  340. uint64_t *ptr;
  341. int count;
  342. /*
  343. * We need a new command buffer. Fail if there isn't
  344. * one available.
  345. */
  346. uint64_t *new_buffer =
  347. (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
  348. if (unlikely(new_buffer == NULL)) {
  349. if (likely(use_locking))
  350. __cvmx_cmd_queue_unlock(qptr);
  351. return CVMX_CMD_QUEUE_NO_MEMORY;
  352. }
  353. ptr =
  354. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  355. base_ptr_div128 << 7);
  356. /*
  357. * Figure out how many command words will fit in this
  358. * buffer. One location will be needed for the next
  359. * buffer pointer.
  360. */
  361. count = qptr->pool_size_m1 - qptr->index;
  362. ptr += qptr->index;
  363. cmd_count -= count;
  364. while (count--)
  365. *ptr++ = *cmds++;
  366. *ptr = cvmx_ptr_to_phys(new_buffer);
  367. /*
  368. * The current buffer is full and has a link to the
  369. * next buffer. Time to write the rest of the commands
  370. * into the new buffer.
  371. */
  372. qptr->base_ptr_div128 = *ptr >> 7;
  373. qptr->index = cmd_count;
  374. ptr = new_buffer;
  375. while (cmd_count--)
  376. *ptr++ = *cmds++;
  377. }
  378. /* All updates are complete. Release the lock and return */
  379. if (likely(use_locking))
  380. __cvmx_cmd_queue_unlock(qptr);
  381. return CVMX_CMD_QUEUE_SUCCESS;
  382. }
  383. /**
  384. * Simple function to write two command words to a command
  385. * queue.
  386. *
  387. * @queue_id: Hardware command queue to write to
  388. * @use_locking:
  389. * Use internal locking to ensure exclusive access for queue
  390. * updates. If you don't use this locking you must ensure
  391. * exclusivity some other way. Locking is strongly recommended.
  392. * @cmd1: Command
  393. * @cmd2: Command
  394. *
  395. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  396. */
  397. static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
  398. queue_id,
  399. int use_locking,
  400. uint64_t cmd1,
  401. uint64_t cmd2)
  402. {
  403. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  404. /* Make sure nobody else is updating the same queue */
  405. if (likely(use_locking))
  406. __cvmx_cmd_queue_lock(queue_id, qptr);
  407. /*
  408. * If a max queue length was specified then make sure we don't
  409. * exceed it. If any part of the command would be below the
  410. * limit we allow it.
  411. */
  412. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
  413. if (unlikely
  414. (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
  415. if (likely(use_locking))
  416. __cvmx_cmd_queue_unlock(qptr);
  417. return CVMX_CMD_QUEUE_FULL;
  418. }
  419. }
  420. /*
  421. * Normally there is plenty of room in the current buffer for
  422. * the command.
  423. */
  424. if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
  425. uint64_t *ptr =
  426. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  427. base_ptr_div128 << 7);
  428. ptr += qptr->index;
  429. qptr->index += 2;
  430. ptr[0] = cmd1;
  431. ptr[1] = cmd2;
  432. } else {
  433. uint64_t *ptr;
  434. /*
  435. * Figure out how many command words will fit in this
  436. * buffer. One location will be needed for the next
  437. * buffer pointer.
  438. */
  439. int count = qptr->pool_size_m1 - qptr->index;
  440. /*
  441. * We need a new command buffer. Fail if there isn't
  442. * one available.
  443. */
  444. uint64_t *new_buffer =
  445. (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
  446. if (unlikely(new_buffer == NULL)) {
  447. if (likely(use_locking))
  448. __cvmx_cmd_queue_unlock(qptr);
  449. return CVMX_CMD_QUEUE_NO_MEMORY;
  450. }
  451. count--;
  452. ptr =
  453. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  454. base_ptr_div128 << 7);
  455. ptr += qptr->index;
  456. *ptr++ = cmd1;
  457. if (likely(count))
  458. *ptr++ = cmd2;
  459. *ptr = cvmx_ptr_to_phys(new_buffer);
  460. /*
  461. * The current buffer is full and has a link to the
  462. * next buffer. Time to write the rest of the commands
  463. * into the new buffer.
  464. */
  465. qptr->base_ptr_div128 = *ptr >> 7;
  466. qptr->index = 0;
  467. if (unlikely(count == 0)) {
  468. qptr->index = 1;
  469. new_buffer[0] = cmd2;
  470. }
  471. }
  472. /* All updates are complete. Release the lock and return */
  473. if (likely(use_locking))
  474. __cvmx_cmd_queue_unlock(qptr);
  475. return CVMX_CMD_QUEUE_SUCCESS;
  476. }
  477. /**
  478. * Simple function to write three command words to a command
  479. * queue.
  480. *
  481. * @queue_id: Hardware command queue to write to
  482. * @use_locking:
  483. * Use internal locking to ensure exclusive access for queue
  484. * updates. If you don't use this locking you must ensure
  485. * exclusivity some other way. Locking is strongly recommended.
  486. * @cmd1: Command
  487. * @cmd2: Command
  488. * @cmd3: Command
  489. *
  490. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  491. */
  492. static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
  493. queue_id,
  494. int use_locking,
  495. uint64_t cmd1,
  496. uint64_t cmd2,
  497. uint64_t cmd3)
  498. {
  499. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  500. /* Make sure nobody else is updating the same queue */
  501. if (likely(use_locking))
  502. __cvmx_cmd_queue_lock(queue_id, qptr);
  503. /*
  504. * If a max queue length was specified then make sure we don't
  505. * exceed it. If any part of the command would be below the
  506. * limit we allow it.
  507. */
  508. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
  509. if (unlikely
  510. (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
  511. if (likely(use_locking))
  512. __cvmx_cmd_queue_unlock(qptr);
  513. return CVMX_CMD_QUEUE_FULL;
  514. }
  515. }
  516. /*
  517. * Normally there is plenty of room in the current buffer for
  518. * the command.
  519. */
  520. if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
  521. uint64_t *ptr =
  522. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  523. base_ptr_div128 << 7);
  524. ptr += qptr->index;
  525. qptr->index += 3;
  526. ptr[0] = cmd1;
  527. ptr[1] = cmd2;
  528. ptr[2] = cmd3;
  529. } else {
  530. uint64_t *ptr;
  531. /*
  532. * Figure out how many command words will fit in this
  533. * buffer. One location will be needed for the next
  534. * buffer pointer
  535. */
  536. int count = qptr->pool_size_m1 - qptr->index;
  537. /*
  538. * We need a new command buffer. Fail if there isn't
  539. * one available
  540. */
  541. uint64_t *new_buffer =
  542. (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
  543. if (unlikely(new_buffer == NULL)) {
  544. if (likely(use_locking))
  545. __cvmx_cmd_queue_unlock(qptr);
  546. return CVMX_CMD_QUEUE_NO_MEMORY;
  547. }
  548. count--;
  549. ptr =
  550. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  551. base_ptr_div128 << 7);
  552. ptr += qptr->index;
  553. *ptr++ = cmd1;
  554. if (count) {
  555. *ptr++ = cmd2;
  556. if (count > 1)
  557. *ptr++ = cmd3;
  558. }
  559. *ptr = cvmx_ptr_to_phys(new_buffer);
  560. /*
  561. * The current buffer is full and has a link to the
  562. * next buffer. Time to write the rest of the commands
  563. * into the new buffer.
  564. */
  565. qptr->base_ptr_div128 = *ptr >> 7;
  566. qptr->index = 0;
  567. ptr = new_buffer;
  568. if (count == 0) {
  569. *ptr++ = cmd2;
  570. qptr->index++;
  571. }
  572. if (count < 2) {
  573. *ptr++ = cmd3;
  574. qptr->index++;
  575. }
  576. }
  577. /* All updates are complete. Release the lock and return */
  578. if (likely(use_locking))
  579. __cvmx_cmd_queue_unlock(qptr);
  580. return CVMX_CMD_QUEUE_SUCCESS;
  581. }
  582. #endif /* __CVMX_CMD_QUEUE_H__ */