qed_if.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. /* QLogic qed NIC Driver
  2. *
  3. * Copyright (c) 2015 QLogic Corporation
  4. *
  5. * This software is available under the terms of the GNU General Public License
  6. * (GPL) Version 2, available from the file COPYING in the main directory of
  7. * this source tree.
  8. */
  9. #ifndef _QED_IF_H
  10. #define _QED_IF_H
  11. #include <linux/types.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/pci.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/types.h>
  17. #include <asm/byteorder.h>
  18. #include <linux/io.h>
  19. #include <linux/compiler.h>
  20. #include <linux/kernel.h>
  21. #include <linux/list.h>
  22. #include <linux/slab.h>
  23. #include <linux/qed/common_hsi.h>
  24. #include <linux/qed/qed_chain.h>
  25. enum dcbx_protocol_type {
  26. DCBX_PROTOCOL_ISCSI,
  27. DCBX_PROTOCOL_FCOE,
  28. DCBX_PROTOCOL_ROCE,
  29. DCBX_PROTOCOL_ROCE_V2,
  30. DCBX_PROTOCOL_ETH,
  31. DCBX_MAX_PROTOCOL_TYPE
  32. };
  33. #define QED_ROCE_PROTOCOL_INDEX (3)
  34. #ifdef CONFIG_DCB
  35. #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
  36. #define QED_LLDP_PORT_ID_STAT_LEN 4
  37. #define QED_DCBX_MAX_APP_PROTOCOL 32
  38. #define QED_MAX_PFC_PRIORITIES 8
  39. #define QED_DCBX_DSCP_SIZE 64
  40. struct qed_dcbx_lldp_remote {
  41. u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
  42. u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
  43. bool enable_rx;
  44. bool enable_tx;
  45. u32 tx_interval;
  46. u32 max_credit;
  47. };
  48. struct qed_dcbx_lldp_local {
  49. u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
  50. u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
  51. };
  52. struct qed_dcbx_app_prio {
  53. u8 roce;
  54. u8 roce_v2;
  55. u8 fcoe;
  56. u8 iscsi;
  57. u8 eth;
  58. };
  59. struct qed_dbcx_pfc_params {
  60. bool willing;
  61. bool enabled;
  62. u8 prio[QED_MAX_PFC_PRIORITIES];
  63. u8 max_tc;
  64. };
  65. enum qed_dcbx_sf_ieee_type {
  66. QED_DCBX_SF_IEEE_ETHTYPE,
  67. QED_DCBX_SF_IEEE_TCP_PORT,
  68. QED_DCBX_SF_IEEE_UDP_PORT,
  69. QED_DCBX_SF_IEEE_TCP_UDP_PORT
  70. };
  71. struct qed_app_entry {
  72. bool ethtype;
  73. enum qed_dcbx_sf_ieee_type sf_ieee;
  74. bool enabled;
  75. u8 prio;
  76. u16 proto_id;
  77. enum dcbx_protocol_type proto_type;
  78. };
  79. struct qed_dcbx_params {
  80. struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
  81. u16 num_app_entries;
  82. bool app_willing;
  83. bool app_valid;
  84. bool app_error;
  85. bool ets_willing;
  86. bool ets_enabled;
  87. bool ets_cbs;
  88. bool valid;
  89. u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
  90. u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
  91. u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
  92. struct qed_dbcx_pfc_params pfc;
  93. u8 max_ets_tc;
  94. };
  95. struct qed_dcbx_admin_params {
  96. struct qed_dcbx_params params;
  97. bool valid;
  98. };
  99. struct qed_dcbx_remote_params {
  100. struct qed_dcbx_params params;
  101. bool valid;
  102. };
  103. struct qed_dcbx_operational_params {
  104. struct qed_dcbx_app_prio app_prio;
  105. struct qed_dcbx_params params;
  106. bool valid;
  107. bool enabled;
  108. bool ieee;
  109. bool cee;
  110. u32 err;
  111. };
  112. struct qed_dcbx_get {
  113. struct qed_dcbx_operational_params operational;
  114. struct qed_dcbx_lldp_remote lldp_remote;
  115. struct qed_dcbx_lldp_local lldp_local;
  116. struct qed_dcbx_remote_params remote;
  117. struct qed_dcbx_admin_params local;
  118. };
  119. #endif
  120. enum qed_led_mode {
  121. QED_LED_MODE_OFF,
  122. QED_LED_MODE_ON,
  123. QED_LED_MODE_RESTORE
  124. };
  125. #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
  126. (void __iomem *)(reg_addr))
  127. #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
  128. #define QED_COALESCE_MAX 0xFF
  129. #define QED_DEFAULT_RX_USECS 12
  130. /* forward */
  131. struct qed_dev;
  132. struct qed_eth_pf_params {
  133. /* The following parameters are used during HW-init
  134. * and these parameters need to be passed as arguments
  135. * to update_pf_params routine invoked before slowpath start
  136. */
  137. u16 num_cons;
  138. };
  139. /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
  140. struct qed_iscsi_pf_params {
  141. u64 glbl_q_params_addr;
  142. u64 bdq_pbl_base_addr[2];
  143. u32 max_cwnd;
  144. u16 cq_num_entries;
  145. u16 cmdq_num_entries;
  146. u16 dup_ack_threshold;
  147. u16 tx_sws_timer;
  148. u16 min_rto;
  149. u16 min_rto_rt;
  150. u16 max_rto;
  151. /* The following parameters are used during HW-init
  152. * and these parameters need to be passed as arguments
  153. * to update_pf_params routine invoked before slowpath start
  154. */
  155. u16 num_cons;
  156. u16 num_tasks;
  157. /* The following parameters are used during protocol-init */
  158. u16 half_way_close_timeout;
  159. u16 bdq_xoff_threshold[2];
  160. u16 bdq_xon_threshold[2];
  161. u16 cmdq_xoff_threshold;
  162. u16 cmdq_xon_threshold;
  163. u16 rq_buffer_size;
  164. u8 num_sq_pages_in_ring;
  165. u8 num_r2tq_pages_in_ring;
  166. u8 num_uhq_pages_in_ring;
  167. u8 num_queues;
  168. u8 log_page_size;
  169. u8 rqe_log_size;
  170. u8 max_fin_rt;
  171. u8 gl_rq_pi;
  172. u8 gl_cmd_pi;
  173. u8 debug_mode;
  174. u8 ll2_ooo_queue_id;
  175. u8 ooo_enable;
  176. u8 is_target;
  177. u8 bdq_pbl_num_entries[2];
  178. };
  179. struct qed_rdma_pf_params {
  180. /* Supplied to QED during resource allocation (may affect the ILT and
  181. * the doorbell BAR).
  182. */
  183. u32 min_dpis; /* number of requested DPIs */
  184. u32 num_mrs; /* number of requested memory regions */
  185. u32 num_qps; /* number of requested Queue Pairs */
  186. u32 num_srqs; /* number of requested SRQ */
  187. u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
  188. u8 gl_pi; /* protocol index */
  189. /* Will allocate rate limiters to be used with QPs */
  190. u8 enable_dcqcn;
  191. };
  192. struct qed_pf_params {
  193. struct qed_eth_pf_params eth_pf_params;
  194. struct qed_iscsi_pf_params iscsi_pf_params;
  195. struct qed_rdma_pf_params rdma_pf_params;
  196. };
  197. enum qed_int_mode {
  198. QED_INT_MODE_INTA,
  199. QED_INT_MODE_MSIX,
  200. QED_INT_MODE_MSI,
  201. QED_INT_MODE_POLL,
  202. };
  203. struct qed_sb_info {
  204. struct status_block *sb_virt;
  205. dma_addr_t sb_phys;
  206. u32 sb_ack; /* Last given ack */
  207. u16 igu_sb_id;
  208. void __iomem *igu_addr;
  209. u8 flags;
  210. #define QED_SB_INFO_INIT 0x1
  211. #define QED_SB_INFO_SETUP 0x2
  212. struct qed_dev *cdev;
  213. };
  214. struct qed_dev_info {
  215. unsigned long pci_mem_start;
  216. unsigned long pci_mem_end;
  217. unsigned int pci_irq;
  218. u8 num_hwfns;
  219. u8 hw_mac[ETH_ALEN];
  220. bool is_mf_default;
  221. /* FW version */
  222. u16 fw_major;
  223. u16 fw_minor;
  224. u16 fw_rev;
  225. u16 fw_eng;
  226. /* MFW version */
  227. u32 mfw_rev;
  228. u32 flash_size;
  229. u8 mf_mode;
  230. bool tx_switching;
  231. bool rdma_supported;
  232. };
  233. enum qed_sb_type {
  234. QED_SB_TYPE_L2_QUEUE,
  235. QED_SB_TYPE_CNQ,
  236. };
  237. enum qed_protocol {
  238. QED_PROTOCOL_ETH,
  239. QED_PROTOCOL_ISCSI,
  240. };
  241. enum qed_link_mode_bits {
  242. QED_LM_FIBRE_BIT = BIT(0),
  243. QED_LM_Autoneg_BIT = BIT(1),
  244. QED_LM_Asym_Pause_BIT = BIT(2),
  245. QED_LM_Pause_BIT = BIT(3),
  246. QED_LM_1000baseT_Half_BIT = BIT(4),
  247. QED_LM_1000baseT_Full_BIT = BIT(5),
  248. QED_LM_10000baseKR_Full_BIT = BIT(6),
  249. QED_LM_25000baseKR_Full_BIT = BIT(7),
  250. QED_LM_40000baseLR4_Full_BIT = BIT(8),
  251. QED_LM_50000baseKR2_Full_BIT = BIT(9),
  252. QED_LM_100000baseKR4_Full_BIT = BIT(10),
  253. QED_LM_COUNT = 11
  254. };
  255. struct qed_link_params {
  256. bool link_up;
  257. #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
  258. #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
  259. #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
  260. #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
  261. #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
  262. u32 override_flags;
  263. bool autoneg;
  264. u32 adv_speeds;
  265. u32 forced_speed;
  266. #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
  267. #define QED_LINK_PAUSE_RX_ENABLE BIT(1)
  268. #define QED_LINK_PAUSE_TX_ENABLE BIT(2)
  269. u32 pause_config;
  270. #define QED_LINK_LOOPBACK_NONE BIT(0)
  271. #define QED_LINK_LOOPBACK_INT_PHY BIT(1)
  272. #define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
  273. #define QED_LINK_LOOPBACK_EXT BIT(3)
  274. #define QED_LINK_LOOPBACK_MAC BIT(4)
  275. u32 loopback_mode;
  276. };
  277. struct qed_link_output {
  278. bool link_up;
  279. /* In QED_LM_* defs */
  280. u32 supported_caps;
  281. u32 advertised_caps;
  282. u32 lp_caps;
  283. u32 speed; /* In Mb/s */
  284. u8 duplex; /* In DUPLEX defs */
  285. u8 port; /* In PORT defs */
  286. bool autoneg;
  287. u32 pause_config;
  288. };
  289. struct qed_probe_params {
  290. enum qed_protocol protocol;
  291. u32 dp_module;
  292. u8 dp_level;
  293. bool is_vf;
  294. };
  295. #define QED_DRV_VER_STR_SIZE 12
  296. struct qed_slowpath_params {
  297. u32 int_mode;
  298. u8 drv_major;
  299. u8 drv_minor;
  300. u8 drv_rev;
  301. u8 drv_eng;
  302. u8 name[QED_DRV_VER_STR_SIZE];
  303. };
  304. #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
  305. struct qed_int_info {
  306. struct msix_entry *msix;
  307. u8 msix_cnt;
  308. /* This should be updated by the protocol driver */
  309. u8 used_cnt;
  310. };
  311. struct qed_common_cb_ops {
  312. void (*link_update)(void *dev,
  313. struct qed_link_output *link);
  314. };
  315. struct qed_selftest_ops {
  316. /**
  317. * @brief selftest_interrupt - Perform interrupt test
  318. *
  319. * @param cdev
  320. *
  321. * @return 0 on success, error otherwise.
  322. */
  323. int (*selftest_interrupt)(struct qed_dev *cdev);
  324. /**
  325. * @brief selftest_memory - Perform memory test
  326. *
  327. * @param cdev
  328. *
  329. * @return 0 on success, error otherwise.
  330. */
  331. int (*selftest_memory)(struct qed_dev *cdev);
  332. /**
  333. * @brief selftest_register - Perform register test
  334. *
  335. * @param cdev
  336. *
  337. * @return 0 on success, error otherwise.
  338. */
  339. int (*selftest_register)(struct qed_dev *cdev);
  340. /**
  341. * @brief selftest_clock - Perform clock test
  342. *
  343. * @param cdev
  344. *
  345. * @return 0 on success, error otherwise.
  346. */
  347. int (*selftest_clock)(struct qed_dev *cdev);
  348. };
  349. struct qed_common_ops {
  350. struct qed_selftest_ops *selftest;
  351. struct qed_dev* (*probe)(struct pci_dev *dev,
  352. struct qed_probe_params *params);
  353. void (*remove)(struct qed_dev *cdev);
  354. int (*set_power_state)(struct qed_dev *cdev,
  355. pci_power_t state);
  356. void (*set_id)(struct qed_dev *cdev,
  357. char name[],
  358. char ver_str[]);
  359. /* Client drivers need to make this call before slowpath_start.
  360. * PF params required for the call before slowpath_start is
  361. * documented within the qed_pf_params structure definition.
  362. */
  363. void (*update_pf_params)(struct qed_dev *cdev,
  364. struct qed_pf_params *params);
  365. int (*slowpath_start)(struct qed_dev *cdev,
  366. struct qed_slowpath_params *params);
  367. int (*slowpath_stop)(struct qed_dev *cdev);
  368. /* Requests to use `cnt' interrupts for fastpath.
  369. * upon success, returns number of interrupts allocated for fastpath.
  370. */
  371. int (*set_fp_int)(struct qed_dev *cdev,
  372. u16 cnt);
  373. /* Fills `info' with pointers required for utilizing interrupts */
  374. int (*get_fp_int)(struct qed_dev *cdev,
  375. struct qed_int_info *info);
  376. u32 (*sb_init)(struct qed_dev *cdev,
  377. struct qed_sb_info *sb_info,
  378. void *sb_virt_addr,
  379. dma_addr_t sb_phy_addr,
  380. u16 sb_id,
  381. enum qed_sb_type type);
  382. u32 (*sb_release)(struct qed_dev *cdev,
  383. struct qed_sb_info *sb_info,
  384. u16 sb_id);
  385. void (*simd_handler_config)(struct qed_dev *cdev,
  386. void *token,
  387. int index,
  388. void (*handler)(void *));
  389. void (*simd_handler_clean)(struct qed_dev *cdev,
  390. int index);
  391. int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
  392. int (*dbg_all_data_size) (struct qed_dev *cdev);
  393. /**
  394. * @brief can_link_change - can the instance change the link or not
  395. *
  396. * @param cdev
  397. *
  398. * @return true if link-change is allowed, false otherwise.
  399. */
  400. bool (*can_link_change)(struct qed_dev *cdev);
  401. /**
  402. * @brief set_link - set links according to params
  403. *
  404. * @param cdev
  405. * @param params - values used to override the default link configuration
  406. *
  407. * @return 0 on success, error otherwise.
  408. */
  409. int (*set_link)(struct qed_dev *cdev,
  410. struct qed_link_params *params);
  411. /**
  412. * @brief get_link - returns the current link state.
  413. *
  414. * @param cdev
  415. * @param if_link - structure to be filled with current link configuration.
  416. */
  417. void (*get_link)(struct qed_dev *cdev,
  418. struct qed_link_output *if_link);
  419. /**
  420. * @brief - drains chip in case Tx completions fail to arrive due to pause.
  421. *
  422. * @param cdev
  423. */
  424. int (*drain)(struct qed_dev *cdev);
  425. /**
  426. * @brief update_msglvl - update module debug level
  427. *
  428. * @param cdev
  429. * @param dp_module
  430. * @param dp_level
  431. */
  432. void (*update_msglvl)(struct qed_dev *cdev,
  433. u32 dp_module,
  434. u8 dp_level);
  435. int (*chain_alloc)(struct qed_dev *cdev,
  436. enum qed_chain_use_mode intended_use,
  437. enum qed_chain_mode mode,
  438. enum qed_chain_cnt_type cnt_type,
  439. u32 num_elems,
  440. size_t elem_size,
  441. struct qed_chain *p_chain);
  442. void (*chain_free)(struct qed_dev *cdev,
  443. struct qed_chain *p_chain);
  444. /**
  445. * @brief get_coalesce - Get coalesce parameters in usec
  446. *
  447. * @param cdev
  448. * @param rx_coal - Rx coalesce value in usec
  449. * @param tx_coal - Tx coalesce value in usec
  450. *
  451. */
  452. void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
  453. /**
  454. * @brief set_coalesce - Configure Rx coalesce value in usec
  455. *
  456. * @param cdev
  457. * @param rx_coal - Rx coalesce value in usec
  458. * @param tx_coal - Tx coalesce value in usec
  459. * @param qid - Queue index
  460. * @param sb_id - Status Block Id
  461. *
  462. * @return 0 on success, error otherwise.
  463. */
  464. int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
  465. u8 qid, u16 sb_id);
  466. /**
  467. * @brief set_led - Configure LED mode
  468. *
  469. * @param cdev
  470. * @param mode - LED mode
  471. *
  472. * @return 0 on success, error otherwise.
  473. */
  474. int (*set_led)(struct qed_dev *cdev,
  475. enum qed_led_mode mode);
  476. };
  477. #define MASK_FIELD(_name, _value) \
  478. ((_value) &= (_name ## _MASK))
  479. #define FIELD_VALUE(_name, _value) \
  480. ((_value & _name ## _MASK) << _name ## _SHIFT)
  481. #define SET_FIELD(value, name, flag) \
  482. do { \
  483. (value) &= ~(name ## _MASK << name ## _SHIFT); \
  484. (value) |= (((u64)flag) << (name ## _SHIFT)); \
  485. } while (0)
  486. #define GET_FIELD(value, name) \
  487. (((value) >> (name ## _SHIFT)) & name ## _MASK)
  488. /* Debug print definitions */
  489. #define DP_ERR(cdev, fmt, ...) \
  490. pr_err("[%s:%d(%s)]" fmt, \
  491. __func__, __LINE__, \
  492. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  493. ## __VA_ARGS__) \
  494. #define DP_NOTICE(cdev, fmt, ...) \
  495. do { \
  496. if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
  497. pr_notice("[%s:%d(%s)]" fmt, \
  498. __func__, __LINE__, \
  499. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  500. ## __VA_ARGS__); \
  501. \
  502. } \
  503. } while (0)
  504. #define DP_INFO(cdev, fmt, ...) \
  505. do { \
  506. if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
  507. pr_notice("[%s:%d(%s)]" fmt, \
  508. __func__, __LINE__, \
  509. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  510. ## __VA_ARGS__); \
  511. } \
  512. } while (0)
  513. #define DP_VERBOSE(cdev, module, fmt, ...) \
  514. do { \
  515. if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
  516. ((cdev)->dp_module & module))) { \
  517. pr_notice("[%s:%d(%s)]" fmt, \
  518. __func__, __LINE__, \
  519. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  520. ## __VA_ARGS__); \
  521. } \
  522. } while (0)
  523. enum DP_LEVEL {
  524. QED_LEVEL_VERBOSE = 0x0,
  525. QED_LEVEL_INFO = 0x1,
  526. QED_LEVEL_NOTICE = 0x2,
  527. QED_LEVEL_ERR = 0x3,
  528. };
  529. #define QED_LOG_LEVEL_SHIFT (30)
  530. #define QED_LOG_VERBOSE_MASK (0x3fffffff)
  531. #define QED_LOG_INFO_MASK (0x40000000)
  532. #define QED_LOG_NOTICE_MASK (0x80000000)
  533. enum DP_MODULE {
  534. QED_MSG_SPQ = 0x10000,
  535. QED_MSG_STATS = 0x20000,
  536. QED_MSG_DCB = 0x40000,
  537. QED_MSG_IOV = 0x80000,
  538. QED_MSG_SP = 0x100000,
  539. QED_MSG_STORAGE = 0x200000,
  540. QED_MSG_CXT = 0x800000,
  541. QED_MSG_LL2 = 0x1000000,
  542. QED_MSG_ILT = 0x2000000,
  543. QED_MSG_RDMA = 0x4000000,
  544. QED_MSG_DEBUG = 0x8000000,
  545. /* to be added...up to 0x8000000 */
  546. };
  547. enum qed_mf_mode {
  548. QED_MF_DEFAULT,
  549. QED_MF_OVLAN,
  550. QED_MF_NPAR,
  551. };
  552. struct qed_eth_stats {
  553. u64 no_buff_discards;
  554. u64 packet_too_big_discard;
  555. u64 ttl0_discard;
  556. u64 rx_ucast_bytes;
  557. u64 rx_mcast_bytes;
  558. u64 rx_bcast_bytes;
  559. u64 rx_ucast_pkts;
  560. u64 rx_mcast_pkts;
  561. u64 rx_bcast_pkts;
  562. u64 mftag_filter_discards;
  563. u64 mac_filter_discards;
  564. u64 tx_ucast_bytes;
  565. u64 tx_mcast_bytes;
  566. u64 tx_bcast_bytes;
  567. u64 tx_ucast_pkts;
  568. u64 tx_mcast_pkts;
  569. u64 tx_bcast_pkts;
  570. u64 tx_err_drop_pkts;
  571. u64 tpa_coalesced_pkts;
  572. u64 tpa_coalesced_events;
  573. u64 tpa_aborts_num;
  574. u64 tpa_not_coalesced_pkts;
  575. u64 tpa_coalesced_bytes;
  576. /* port */
  577. u64 rx_64_byte_packets;
  578. u64 rx_65_to_127_byte_packets;
  579. u64 rx_128_to_255_byte_packets;
  580. u64 rx_256_to_511_byte_packets;
  581. u64 rx_512_to_1023_byte_packets;
  582. u64 rx_1024_to_1518_byte_packets;
  583. u64 rx_1519_to_1522_byte_packets;
  584. u64 rx_1519_to_2047_byte_packets;
  585. u64 rx_2048_to_4095_byte_packets;
  586. u64 rx_4096_to_9216_byte_packets;
  587. u64 rx_9217_to_16383_byte_packets;
  588. u64 rx_crc_errors;
  589. u64 rx_mac_crtl_frames;
  590. u64 rx_pause_frames;
  591. u64 rx_pfc_frames;
  592. u64 rx_align_errors;
  593. u64 rx_carrier_errors;
  594. u64 rx_oversize_packets;
  595. u64 rx_jabbers;
  596. u64 rx_undersize_packets;
  597. u64 rx_fragments;
  598. u64 tx_64_byte_packets;
  599. u64 tx_65_to_127_byte_packets;
  600. u64 tx_128_to_255_byte_packets;
  601. u64 tx_256_to_511_byte_packets;
  602. u64 tx_512_to_1023_byte_packets;
  603. u64 tx_1024_to_1518_byte_packets;
  604. u64 tx_1519_to_2047_byte_packets;
  605. u64 tx_2048_to_4095_byte_packets;
  606. u64 tx_4096_to_9216_byte_packets;
  607. u64 tx_9217_to_16383_byte_packets;
  608. u64 tx_pause_frames;
  609. u64 tx_pfc_frames;
  610. u64 tx_lpi_entry_count;
  611. u64 tx_total_collisions;
  612. u64 brb_truncates;
  613. u64 brb_discards;
  614. u64 rx_mac_bytes;
  615. u64 rx_mac_uc_packets;
  616. u64 rx_mac_mc_packets;
  617. u64 rx_mac_bc_packets;
  618. u64 rx_mac_frames_ok;
  619. u64 tx_mac_bytes;
  620. u64 tx_mac_uc_packets;
  621. u64 tx_mac_mc_packets;
  622. u64 tx_mac_bc_packets;
  623. u64 tx_mac_ctrl_frames;
  624. };
  625. #define QED_SB_IDX 0x0002
  626. #define RX_PI 0
  627. #define TX_PI(tc) (RX_PI + 1 + tc)
  628. struct qed_sb_cnt_info {
  629. int sb_cnt;
  630. int sb_iov_cnt;
  631. int sb_free_blk;
  632. };
  633. static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
  634. {
  635. u32 prod = 0;
  636. u16 rc = 0;
  637. prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
  638. STATUS_BLOCK_PROD_INDEX_MASK;
  639. if (sb_info->sb_ack != prod) {
  640. sb_info->sb_ack = prod;
  641. rc |= QED_SB_IDX;
  642. }
  643. /* Let SB update */
  644. mmiowb();
  645. return rc;
  646. }
  647. /**
  648. *
  649. * @brief This function creates an update command for interrupts that is
  650. * written to the IGU.
  651. *
  652. * @param sb_info - This is the structure allocated and
  653. * initialized per status block. Assumption is
  654. * that it was initialized using qed_sb_init
  655. * @param int_cmd - Enable/Disable/Nop
  656. * @param upd_flg - whether igu consumer should be
  657. * updated.
  658. *
  659. * @return inline void
  660. */
  661. static inline void qed_sb_ack(struct qed_sb_info *sb_info,
  662. enum igu_int_cmd int_cmd,
  663. u8 upd_flg)
  664. {
  665. struct igu_prod_cons_update igu_ack = { 0 };
  666. igu_ack.sb_id_and_flags =
  667. ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
  668. (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
  669. (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
  670. (IGU_SEG_ACCESS_REG <<
  671. IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
  672. DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
  673. /* Both segments (interrupts & acks) are written to same place address;
  674. * Need to guarantee all commands will be received (in-order) by HW.
  675. */
  676. mmiowb();
  677. barrier();
  678. }
  679. static inline void __internal_ram_wr(void *p_hwfn,
  680. void __iomem *addr,
  681. int size,
  682. u32 *data)
  683. {
  684. unsigned int i;
  685. for (i = 0; i < size / sizeof(*data); i++)
  686. DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
  687. }
  688. static inline void internal_ram_wr(void __iomem *addr,
  689. int size,
  690. u32 *data)
  691. {
  692. __internal_ram_wr(NULL, addr, size, data);
  693. }
  694. enum qed_rss_caps {
  695. QED_RSS_IPV4 = 0x1,
  696. QED_RSS_IPV6 = 0x2,
  697. QED_RSS_IPV4_TCP = 0x4,
  698. QED_RSS_IPV6_TCP = 0x8,
  699. QED_RSS_IPV4_UDP = 0x10,
  700. QED_RSS_IPV6_UDP = 0x20,
  701. };
  702. #define QED_RSS_IND_TABLE_SIZE 128
  703. #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
  704. #endif