qed_chain.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #ifndef _QED_CHAIN_H
  9. #define _QED_CHAIN_H
  10. #include <linux/types.h>
  11. #include <asm/byteorder.h>
  12. #include <linux/kernel.h>
  13. #include <linux/list.h>
  14. #include <linux/slab.h>
  15. #include <linux/qed/common_hsi.h>
  16. enum qed_chain_mode {
  17. /* Each Page contains a next pointer at its end */
  18. QED_CHAIN_MODE_NEXT_PTR,
  19. /* Chain is a single page (next ptr) is unrequired */
  20. QED_CHAIN_MODE_SINGLE,
  21. /* Page pointers are located in a side list */
  22. QED_CHAIN_MODE_PBL,
  23. };
  24. enum qed_chain_use_mode {
  25. QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
  26. QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
  27. QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
  28. };
  29. enum qed_chain_cnt_type {
  30. /* The chain's size/prod/cons are kept in 16-bit variables */
  31. QED_CHAIN_CNT_TYPE_U16,
  32. /* The chain's size/prod/cons are kept in 32-bit variables */
  33. QED_CHAIN_CNT_TYPE_U32,
  34. };
  35. struct qed_chain_next {
  36. struct regpair next_phys;
  37. void *next_virt;
  38. };
  39. struct qed_chain_pbl_u16 {
  40. u16 prod_page_idx;
  41. u16 cons_page_idx;
  42. };
  43. struct qed_chain_pbl_u32 {
  44. u32 prod_page_idx;
  45. u32 cons_page_idx;
  46. };
  47. struct qed_chain_pbl {
  48. /* Base address of a pre-allocated buffer for pbl */
  49. dma_addr_t p_phys_table;
  50. void *p_virt_table;
  51. /* Table for keeping the virtual addresses of the chain pages,
  52. * respectively to the physical addresses in the pbl table.
  53. */
  54. void **pp_virt_addr_tbl;
  55. /* Index to current used page by producer/consumer */
  56. union {
  57. struct qed_chain_pbl_u16 pbl16;
  58. struct qed_chain_pbl_u32 pbl32;
  59. } u;
  60. };
  61. struct qed_chain_u16 {
  62. /* Cyclic index of next element to produce/consme */
  63. u16 prod_idx;
  64. u16 cons_idx;
  65. };
  66. struct qed_chain_u32 {
  67. /* Cyclic index of next element to produce/consme */
  68. u32 prod_idx;
  69. u32 cons_idx;
  70. };
  71. struct qed_chain {
  72. void *p_virt_addr;
  73. dma_addr_t p_phys_addr;
  74. void *p_prod_elem;
  75. void *p_cons_elem;
  76. enum qed_chain_mode mode;
  77. enum qed_chain_use_mode intended_use; /* used to produce/consume */
  78. enum qed_chain_cnt_type cnt_type;
  79. union {
  80. struct qed_chain_u16 chain16;
  81. struct qed_chain_u32 chain32;
  82. } u;
  83. u32 page_cnt;
  84. /* Number of elements - capacity is for usable elements only,
  85. * while size will contain total number of elements [for entire chain].
  86. */
  87. u32 capacity;
  88. u32 size;
  89. /* Elements information for fast calculations */
  90. u16 elem_per_page;
  91. u16 elem_per_page_mask;
  92. u16 elem_unusable;
  93. u16 usable_per_page;
  94. u16 elem_size;
  95. u16 next_page_mask;
  96. struct qed_chain_pbl pbl;
  97. };
  98. #define QED_CHAIN_PBL_ENTRY_SIZE (8)
  99. #define QED_CHAIN_PAGE_SIZE (0x1000)
  100. #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
  101. #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
  102. ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
  103. (1 + ((sizeof(struct qed_chain_next) - 1) / \
  104. (elem_size))) : 0)
  105. #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
  106. ((u32)(ELEMS_PER_PAGE(elem_size) - \
  107. UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
  108. #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
  109. DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
  110. #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
  111. #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
  112. /* Accessors */
  113. static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
  114. {
  115. return p_chain->u.chain16.prod_idx;
  116. }
  117. static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
  118. {
  119. return p_chain->u.chain16.cons_idx;
  120. }
  121. static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
  122. {
  123. return p_chain->u.chain32.cons_idx;
  124. }
  125. static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
  126. {
  127. u16 used;
  128. used = (u16) (((u32)0x10000 +
  129. (u32)p_chain->u.chain16.prod_idx) -
  130. (u32)p_chain->u.chain16.cons_idx);
  131. if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
  132. used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
  133. p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
  134. return (u16)(p_chain->capacity - used);
  135. }
  136. static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
  137. {
  138. u32 used;
  139. used = (u32) (((u64)0x100000000ULL +
  140. (u64)p_chain->u.chain32.prod_idx) -
  141. (u64)p_chain->u.chain32.cons_idx);
  142. if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
  143. used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
  144. p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
  145. return p_chain->capacity - used;
  146. }
  147. static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
  148. {
  149. return p_chain->usable_per_page;
  150. }
  151. static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
  152. {
  153. return p_chain->elem_unusable;
  154. }
  155. static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
  156. {
  157. return p_chain->page_cnt;
  158. }
  159. static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
  160. {
  161. return p_chain->pbl.p_phys_table;
  162. }
  163. /**
  164. * @brief qed_chain_advance_page -
  165. *
  166. * Advance the next element accros pages for a linked chain
  167. *
  168. * @param p_chain
  169. * @param p_next_elem
  170. * @param idx_to_inc
  171. * @param page_to_inc
  172. */
  173. static inline void
  174. qed_chain_advance_page(struct qed_chain *p_chain,
  175. void **p_next_elem, void *idx_to_inc, void *page_to_inc)
  176. {
  177. struct qed_chain_next *p_next = NULL;
  178. u32 page_index = 0;
  179. switch (p_chain->mode) {
  180. case QED_CHAIN_MODE_NEXT_PTR:
  181. p_next = *p_next_elem;
  182. *p_next_elem = p_next->next_virt;
  183. if (is_chain_u16(p_chain))
  184. *(u16 *)idx_to_inc += p_chain->elem_unusable;
  185. else
  186. *(u32 *)idx_to_inc += p_chain->elem_unusable;
  187. break;
  188. case QED_CHAIN_MODE_SINGLE:
  189. *p_next_elem = p_chain->p_virt_addr;
  190. break;
  191. case QED_CHAIN_MODE_PBL:
  192. if (is_chain_u16(p_chain)) {
  193. if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
  194. *(u16 *)page_to_inc = 0;
  195. page_index = *(u16 *)page_to_inc;
  196. } else {
  197. if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
  198. *(u32 *)page_to_inc = 0;
  199. page_index = *(u32 *)page_to_inc;
  200. }
  201. *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
  202. }
  203. }
  204. #define is_unusable_idx(p, idx) \
  205. (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
  206. #define is_unusable_idx_u32(p, idx) \
  207. (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
  208. #define is_unusable_next_idx(p, idx) \
  209. ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
  210. (p)->usable_per_page)
  211. #define is_unusable_next_idx_u32(p, idx) \
  212. ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
  213. (p)->usable_per_page)
  214. #define test_and_skip(p, idx) \
  215. do { \
  216. if (is_chain_u16(p)) { \
  217. if (is_unusable_idx(p, idx)) \
  218. (p)->u.chain16.idx += (p)->elem_unusable; \
  219. } else { \
  220. if (is_unusable_idx_u32(p, idx)) \
  221. (p)->u.chain32.idx += (p)->elem_unusable; \
  222. } \
  223. } while (0)
  224. /**
  225. * @brief qed_chain_return_produced -
  226. *
  227. * A chain in which the driver "Produces" elements should use this API
  228. * to indicate previous produced elements are now consumed.
  229. *
  230. * @param p_chain
  231. */
  232. static inline void qed_chain_return_produced(struct qed_chain *p_chain)
  233. {
  234. if (is_chain_u16(p_chain))
  235. p_chain->u.chain16.cons_idx++;
  236. else
  237. p_chain->u.chain32.cons_idx++;
  238. test_and_skip(p_chain, cons_idx);
  239. }
  240. /**
  241. * @brief qed_chain_produce -
  242. *
  243. * A chain in which the driver "Produces" elements should use this to get
  244. * a pointer to the next element which can be "Produced". It's driver
  245. * responsibility to validate that the chain has room for new element.
  246. *
  247. * @param p_chain
  248. *
  249. * @return void*, a pointer to next element
  250. */
  251. static inline void *qed_chain_produce(struct qed_chain *p_chain)
  252. {
  253. void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
  254. if (is_chain_u16(p_chain)) {
  255. if ((p_chain->u.chain16.prod_idx &
  256. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  257. p_prod_idx = &p_chain->u.chain16.prod_idx;
  258. p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
  259. qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
  260. p_prod_idx, p_prod_page_idx);
  261. }
  262. p_chain->u.chain16.prod_idx++;
  263. } else {
  264. if ((p_chain->u.chain32.prod_idx &
  265. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  266. p_prod_idx = &p_chain->u.chain32.prod_idx;
  267. p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
  268. qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
  269. p_prod_idx, p_prod_page_idx);
  270. }
  271. p_chain->u.chain32.prod_idx++;
  272. }
  273. p_ret = p_chain->p_prod_elem;
  274. p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
  275. p_chain->elem_size);
  276. return p_ret;
  277. }
  278. /**
  279. * @brief qed_chain_get_capacity -
  280. *
  281. * Get the maximum number of BDs in chain
  282. *
  283. * @param p_chain
  284. * @param num
  285. *
  286. * @return number of unusable BDs
  287. */
  288. static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
  289. {
  290. return p_chain->capacity;
  291. }
  292. /**
  293. * @brief qed_chain_recycle_consumed -
  294. *
  295. * Returns an element which was previously consumed;
  296. * Increments producers so they could be written to FW.
  297. *
  298. * @param p_chain
  299. */
  300. static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
  301. {
  302. test_and_skip(p_chain, prod_idx);
  303. if (is_chain_u16(p_chain))
  304. p_chain->u.chain16.prod_idx++;
  305. else
  306. p_chain->u.chain32.prod_idx++;
  307. }
  308. /**
  309. * @brief qed_chain_consume -
  310. *
  311. * A Chain in which the driver utilizes data written by a different source
  312. * (i.e., FW) should use this to access passed buffers.
  313. *
  314. * @param p_chain
  315. *
  316. * @return void*, a pointer to the next buffer written
  317. */
  318. static inline void *qed_chain_consume(struct qed_chain *p_chain)
  319. {
  320. void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
  321. if (is_chain_u16(p_chain)) {
  322. if ((p_chain->u.chain16.cons_idx &
  323. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  324. p_cons_idx = &p_chain->u.chain16.cons_idx;
  325. p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
  326. qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
  327. p_cons_idx, p_cons_page_idx);
  328. }
  329. p_chain->u.chain16.cons_idx++;
  330. } else {
  331. if ((p_chain->u.chain32.cons_idx &
  332. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  333. p_cons_idx = &p_chain->u.chain32.cons_idx;
  334. p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
  335. qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
  336. p_cons_idx, p_cons_page_idx);
  337. }
  338. p_chain->u.chain32.cons_idx++;
  339. }
  340. p_ret = p_chain->p_cons_elem;
  341. p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
  342. p_chain->elem_size);
  343. return p_ret;
  344. }
  345. /**
  346. * @brief qed_chain_reset - Resets the chain to its start state
  347. *
  348. * @param p_chain pointer to a previously allocted chain
  349. */
  350. static inline void qed_chain_reset(struct qed_chain *p_chain)
  351. {
  352. u32 i;
  353. if (is_chain_u16(p_chain)) {
  354. p_chain->u.chain16.prod_idx = 0;
  355. p_chain->u.chain16.cons_idx = 0;
  356. } else {
  357. p_chain->u.chain32.prod_idx = 0;
  358. p_chain->u.chain32.cons_idx = 0;
  359. }
  360. p_chain->p_cons_elem = p_chain->p_virt_addr;
  361. p_chain->p_prod_elem = p_chain->p_virt_addr;
  362. if (p_chain->mode == QED_CHAIN_MODE_PBL) {
  363. /* Use (page_cnt - 1) as a reset value for the prod/cons page's
  364. * indices, to avoid unnecessary page advancing on the first
  365. * call to qed_chain_produce/consume. Instead, the indices
  366. * will be advanced to page_cnt and then will be wrapped to 0.
  367. */
  368. u32 reset_val = p_chain->page_cnt - 1;
  369. if (is_chain_u16(p_chain)) {
  370. p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
  371. p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
  372. } else {
  373. p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
  374. p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
  375. }
  376. }
  377. switch (p_chain->intended_use) {
  378. case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
  379. case QED_CHAIN_USE_TO_PRODUCE:
  380. /* Do nothing */
  381. break;
  382. case QED_CHAIN_USE_TO_CONSUME:
  383. /* produce empty elements */
  384. for (i = 0; i < p_chain->capacity; i++)
  385. qed_chain_recycle_consumed(p_chain);
  386. break;
  387. }
  388. }
  389. /**
  390. * @brief qed_chain_init - Initalizes a basic chain struct
  391. *
  392. * @param p_chain
  393. * @param p_virt_addr
  394. * @param p_phys_addr physical address of allocated buffer's beginning
  395. * @param page_cnt number of pages in the allocated buffer
  396. * @param elem_size size of each element in the chain
  397. * @param intended_use
  398. * @param mode
  399. */
  400. static inline void qed_chain_init_params(struct qed_chain *p_chain,
  401. u32 page_cnt,
  402. u8 elem_size,
  403. enum qed_chain_use_mode intended_use,
  404. enum qed_chain_mode mode,
  405. enum qed_chain_cnt_type cnt_type)
  406. {
  407. /* chain fixed parameters */
  408. p_chain->p_virt_addr = NULL;
  409. p_chain->p_phys_addr = 0;
  410. p_chain->elem_size = elem_size;
  411. p_chain->intended_use = intended_use;
  412. p_chain->mode = mode;
  413. p_chain->cnt_type = cnt_type;
  414. p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
  415. p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
  416. p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
  417. p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
  418. p_chain->next_page_mask = (p_chain->usable_per_page &
  419. p_chain->elem_per_page_mask);
  420. p_chain->page_cnt = page_cnt;
  421. p_chain->capacity = p_chain->usable_per_page * page_cnt;
  422. p_chain->size = p_chain->elem_per_page * page_cnt;
  423. p_chain->pbl.p_phys_table = 0;
  424. p_chain->pbl.p_virt_table = NULL;
  425. p_chain->pbl.pp_virt_addr_tbl = NULL;
  426. }
  427. /**
  428. * @brief qed_chain_init_mem -
  429. *
  430. * Initalizes a basic chain struct with its chain buffers
  431. *
  432. * @param p_chain
  433. * @param p_virt_addr virtual address of allocated buffer's beginning
  434. * @param p_phys_addr physical address of allocated buffer's beginning
  435. *
  436. */
  437. static inline void qed_chain_init_mem(struct qed_chain *p_chain,
  438. void *p_virt_addr, dma_addr_t p_phys_addr)
  439. {
  440. p_chain->p_virt_addr = p_virt_addr;
  441. p_chain->p_phys_addr = p_phys_addr;
  442. }
  443. /**
  444. * @brief qed_chain_init_pbl_mem -
  445. *
  446. * Initalizes a basic chain struct with its pbl buffers
  447. *
  448. * @param p_chain
  449. * @param p_virt_pbl pointer to a pre allocated side table which will hold
  450. * virtual page addresses.
  451. * @param p_phys_pbl pointer to a pre-allocated side table which will hold
  452. * physical page addresses.
  453. * @param pp_virt_addr_tbl
  454. * pointer to a pre-allocated side table which will hold
  455. * the virtual addresses of the chain pages.
  456. *
  457. */
  458. static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
  459. void *p_virt_pbl,
  460. dma_addr_t p_phys_pbl,
  461. void **pp_virt_addr_tbl)
  462. {
  463. p_chain->pbl.p_phys_table = p_phys_pbl;
  464. p_chain->pbl.p_virt_table = p_virt_pbl;
  465. p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
  466. }
  467. /**
  468. * @brief qed_chain_init_next_ptr_elem -
  469. *
  470. * Initalizes a next pointer element
  471. *
  472. * @param p_chain
  473. * @param p_virt_curr virtual address of a chain page of which the next
  474. * pointer element is initialized
  475. * @param p_virt_next virtual address of the next chain page
  476. * @param p_phys_next physical address of the next chain page
  477. *
  478. */
  479. static inline void
  480. qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
  481. void *p_virt_curr,
  482. void *p_virt_next, dma_addr_t p_phys_next)
  483. {
  484. struct qed_chain_next *p_next;
  485. u32 size;
  486. size = p_chain->elem_size * p_chain->usable_per_page;
  487. p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
  488. DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
  489. p_next->next_virt = p_virt_next;
  490. }
  491. /**
  492. * @brief qed_chain_get_last_elem -
  493. *
  494. * Returns a pointer to the last element of the chain
  495. *
  496. * @param p_chain
  497. *
  498. * @return void*
  499. */
  500. static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
  501. {
  502. struct qed_chain_next *p_next = NULL;
  503. void *p_virt_addr = NULL;
  504. u32 size, last_page_idx;
  505. if (!p_chain->p_virt_addr)
  506. goto out;
  507. switch (p_chain->mode) {
  508. case QED_CHAIN_MODE_NEXT_PTR:
  509. size = p_chain->elem_size * p_chain->usable_per_page;
  510. p_virt_addr = p_chain->p_virt_addr;
  511. p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
  512. while (p_next->next_virt != p_chain->p_virt_addr) {
  513. p_virt_addr = p_next->next_virt;
  514. p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
  515. size);
  516. }
  517. break;
  518. case QED_CHAIN_MODE_SINGLE:
  519. p_virt_addr = p_chain->p_virt_addr;
  520. break;
  521. case QED_CHAIN_MODE_PBL:
  522. last_page_idx = p_chain->page_cnt - 1;
  523. p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
  524. break;
  525. }
  526. /* p_virt_addr points at this stage to the last page of the chain */
  527. size = p_chain->elem_size * (p_chain->usable_per_page - 1);
  528. p_virt_addr = (u8 *)p_virt_addr + size;
  529. out:
  530. return p_virt_addr;
  531. }
  532. /**
  533. * @brief qed_chain_set_prod - sets the prod to the given value
  534. *
  535. * @param prod_idx
  536. * @param p_prod_elem
  537. */
  538. static inline void qed_chain_set_prod(struct qed_chain *p_chain,
  539. u32 prod_idx, void *p_prod_elem)
  540. {
  541. if (is_chain_u16(p_chain))
  542. p_chain->u.chain16.prod_idx = (u16) prod_idx;
  543. else
  544. p_chain->u.chain32.prod_idx = prod_idx;
  545. p_chain->p_prod_elem = p_prod_elem;
  546. }
  547. /**
  548. * @brief qed_chain_pbl_zero_mem - set chain memory to 0
  549. *
  550. * @param p_chain
  551. */
  552. static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
  553. {
  554. u32 i, page_cnt;
  555. if (p_chain->mode != QED_CHAIN_MODE_PBL)
  556. return;
  557. page_cnt = qed_chain_get_page_cnt(p_chain);
  558. for (i = 0; i < page_cnt; i++)
  559. memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
  560. QED_CHAIN_PAGE_SIZE);
  561. }
  562. #endif