1 /* SPDX-License-Identifier: (GPL-2.0-only OR B << 2 /* QLogic qed NIC Driver 1 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell Internation !! 3 * >> 4 * This software is available to you under a choice of one of two >> 5 * licenses. You may choose to be licensed under the terms of the GNU >> 6 * General Public License (GPL) Version 2, available from the file >> 7 * COPYING in the main directory of this source tree, or the >> 8 * OpenIB.org BSD license below: >> 9 * >> 10 * Redistribution and use in source and binary forms, with or >> 11 * without modification, are permitted provided that the following >> 12 * conditions are met: >> 13 * >> 14 * - Redistributions of source code must retain the above >> 15 * copyright notice, this list of conditions and the following >> 16 * disclaimer. >> 17 * >> 18 * - Redistributions in binary form must reproduce the above >> 19 * copyright notice, this list of conditions and the following >> 20 * disclaimer in the documentation and /or other materials >> 21 * provided with the distribution. >> 22 * >> 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, >> 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF >> 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND >> 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS >> 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN >> 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN >> 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE >> 30 * SOFTWARE. 5 */ 31 */ 6 << 7 #ifndef _QED_RDMA_IF_H 32 #ifndef _QED_RDMA_IF_H 8 #define _QED_RDMA_IF_H 33 #define _QED_RDMA_IF_H 9 #include <linux/types.h> 34 #include <linux/types.h> 10 #include <linux/delay.h> 35 #include <linux/delay.h> 11 #include <linux/list.h> 36 #include <linux/list.h> 12 #include <linux/slab.h> 37 #include <linux/slab.h> 13 #include <linux/qed/qed_if.h> 38 #include <linux/qed/qed_if.h> 14 #include <linux/qed/qed_ll2_if.h> 39 #include <linux/qed/qed_ll2_if.h> 15 #include <linux/qed/rdma_common.h> 40 #include <linux/qed/rdma_common.h> 16 41 17 #define QED_RDMA_MAX_CNQ_SIZE (0 42 #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) 18 43 19 /* rdma interface */ 44 /* rdma interface */ 20 45 21 enum qed_roce_qp_state { 46 enum qed_roce_qp_state { 22 QED_ROCE_QP_STATE_RESET, 47 QED_ROCE_QP_STATE_RESET, 23 QED_ROCE_QP_STATE_INIT, 48 QED_ROCE_QP_STATE_INIT, 24 QED_ROCE_QP_STATE_RTR, 49 QED_ROCE_QP_STATE_RTR, 25 QED_ROCE_QP_STATE_RTS, 50 QED_ROCE_QP_STATE_RTS, 26 QED_ROCE_QP_STATE_SQD, 51 QED_ROCE_QP_STATE_SQD, 27 QED_ROCE_QP_STATE_ERR, 52 QED_ROCE_QP_STATE_ERR, 28 QED_ROCE_QP_STATE_SQE 53 QED_ROCE_QP_STATE_SQE 29 }; 54 }; 30 55 31 enum qed_rdma_qp_type { << 32 QED_RDMA_QP_TYPE_RC, << 33 QED_RDMA_QP_TYPE_XRC_INI, << 34 QED_RDMA_QP_TYPE_XRC_TGT, << 35 QED_RDMA_QP_TYPE_INVAL = 0xffff, << 36 }; << 37 << 38 enum qed_rdma_tid_type { 56 enum qed_rdma_tid_type { 39 QED_RDMA_TID_REGISTERED_MR, 57 QED_RDMA_TID_REGISTERED_MR, 40 QED_RDMA_TID_FMR, 58 QED_RDMA_TID_FMR, 41 QED_RDMA_TID_MW 59 QED_RDMA_TID_MW 42 }; 60 }; 43 61 44 struct qed_rdma_events { 62 struct qed_rdma_events { 45 void *context; 63 void *context; 46 void (*affiliated_event)(void *context 64 void (*affiliated_event)(void *context, u8 fw_event_code, 47 void *fw_hand 65 void *fw_handle); 48 void (*unaffiliated_event)(void *conte 66 void (*unaffiliated_event)(void *context, u8 event_code); 49 }; 67 }; 50 68 51 struct qed_rdma_device { 69 struct qed_rdma_device { 52 u32 vendor_id; 70 u32 vendor_id; 53 u32 vendor_part_id; 71 u32 vendor_part_id; 54 u32 hw_ver; 72 u32 hw_ver; 55 u64 fw_ver; 73 u64 fw_ver; 56 74 57 u64 node_guid; 75 u64 node_guid; 58 u64 sys_image_guid; 76 u64 sys_image_guid; 59 77 60 u8 max_cnq; 78 u8 max_cnq; 61 u8 max_sge; 79 u8 max_sge; 62 u8 max_srq_sge; 80 u8 max_srq_sge; 63 u16 max_inline; 81 u16 max_inline; 64 u32 max_wqe; 82 u32 max_wqe; 65 u32 max_srq_wqe; 83 u32 max_srq_wqe; 66 u8 max_qp_resp_rd_atomic_resc; 84 u8 max_qp_resp_rd_atomic_resc; 67 u8 max_qp_req_rd_atomic_resc; 85 u8 max_qp_req_rd_atomic_resc; 68 u64 max_dev_resp_rd_atomic_resc; 86 u64 max_dev_resp_rd_atomic_resc; 69 u32 max_cq; 87 u32 max_cq; 70 u32 max_qp; 88 u32 max_qp; 71 u32 max_srq; 89 u32 max_srq; 72 u32 max_mr; 90 u32 max_mr; 73 u64 max_mr_size; 91 u64 max_mr_size; 74 u32 max_cqe; 92 u32 max_cqe; 75 u32 max_mw; 93 u32 max_mw; >> 94 u32 max_fmr; 76 u32 max_mr_mw_fmr_pbl; 95 u32 max_mr_mw_fmr_pbl; 77 u64 max_mr_mw_fmr_size; 96 u64 max_mr_mw_fmr_size; 78 u32 max_pd; 97 u32 max_pd; 79 u32 max_ah; 98 u32 max_ah; 80 u8 max_pkey; 99 u8 max_pkey; 81 u16 max_srq_wr; 100 u16 max_srq_wr; 82 u8 max_stats_queues; 101 u8 max_stats_queues; 83 u32 dev_caps; 102 u32 dev_caps; 84 103 85 /* Abilty to support RNR-NAK generatio 104 /* Abilty to support RNR-NAK generation */ 86 105 87 #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 106 #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 88 #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 107 #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 89 /* Abilty to support shutdown port */ 108 /* Abilty to support shutdown port */ 90 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 109 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 91 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 110 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 92 /* Abilty to support port active event 111 /* Abilty to support port active event */ 93 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MAS 112 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 94 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHI 113 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 95 /* Abilty to support port change event 114 /* Abilty to support port change event */ 96 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MAS 115 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 97 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHI 116 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 98 /* Abilty to support system image GUID 117 /* Abilty to support system image GUID */ 99 #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 118 #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 100 #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 119 #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 101 /* Abilty to support bad P_Key counter 120 /* Abilty to support bad P_Key counter support */ 102 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 121 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 103 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 122 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 104 /* Abilty to support atomic operations 123 /* Abilty to support atomic operations */ 105 #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 124 #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 106 #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 125 #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 107 #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 126 #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 108 #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 127 #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 109 /* Abilty to support modifying the max 128 /* Abilty to support modifying the maximum number of 110 * outstanding work requests per QP 129 * outstanding work requests per QP 111 */ 130 */ 112 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 131 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 113 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 132 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 114 /* Abilty to support automatic path mi 133 /* Abilty to support automatic path migration */ 115 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 134 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 116 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 135 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 117 /* Abilty to support the base memory m 136 /* Abilty to support the base memory management extensions */ 118 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 137 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 119 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 138 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 120 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 139 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 121 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 140 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 122 /* Abilty to support multipile page si 141 /* Abilty to support multipile page sizes per memory region */ 123 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT 142 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 124 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT 143 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 125 /* Abilty to support block list physic 144 /* Abilty to support block list physical buffer list */ 126 #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 145 #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 127 #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 146 #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 128 /* Abilty to support zero based virtua 147 /* Abilty to support zero based virtual addresses */ 129 #define QED_RDMA_DEV_CAP_ZBVA_MASK 148 #define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 130 #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 149 #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 131 /* Abilty to support local invalidate 150 /* Abilty to support local invalidate fencing */ 132 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 151 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 133 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 152 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 134 /* Abilty to support Loopback on QP */ 153 /* Abilty to support Loopback on QP */ 135 #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 154 #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 136 #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 155 #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 137 u64 page_size_caps; 156 u64 page_size_caps; 138 u8 dev_ack_delay; 157 u8 dev_ack_delay; 139 u32 reserved_lkey; 158 u32 reserved_lkey; 140 u32 bad_pkey_counter; 159 u32 bad_pkey_counter; 141 struct qed_rdma_events events; 160 struct qed_rdma_events events; 142 }; 161 }; 143 162 144 enum qed_port_state { 163 enum qed_port_state { 145 QED_RDMA_PORT_UP, 164 QED_RDMA_PORT_UP, 146 QED_RDMA_PORT_DOWN, 165 QED_RDMA_PORT_DOWN, 147 }; 166 }; 148 167 149 enum qed_roce_capability { 168 enum qed_roce_capability { 150 QED_ROCE_V1 = 1 << 0, 169 QED_ROCE_V1 = 1 << 0, 151 QED_ROCE_V2 = 1 << 1, 170 QED_ROCE_V2 = 1 << 1, 152 }; 171 }; 153 172 154 struct qed_rdma_port { 173 struct qed_rdma_port { 155 enum qed_port_state port_state; 174 enum qed_port_state port_state; 156 int link_speed; 175 int link_speed; 157 u64 max_msg_size; 176 u64 max_msg_size; 158 u8 source_gid_table_len; 177 u8 source_gid_table_len; 159 void *source_gid_table_ptr; 178 void *source_gid_table_ptr; 160 u8 pkey_table_len; 179 u8 pkey_table_len; 161 void *pkey_table_ptr; 180 void *pkey_table_ptr; 162 u32 pkey_bad_counter; 181 u32 pkey_bad_counter; 163 enum qed_roce_capability capability; 182 enum qed_roce_capability capability; 164 }; 183 }; 165 184 166 struct qed_rdma_cnq_params { 185 struct qed_rdma_cnq_params { 167 u8 num_pbl_pages; 186 u8 num_pbl_pages; 168 u64 pbl_ptr; 187 u64 pbl_ptr; 169 }; 188 }; 170 189 171 /* The CQ Mode affects the CQ doorbell transac 190 /* The CQ Mode affects the CQ doorbell transaction size. 172 * 64/32 bit machines should configure to 32/1 191 * 64/32 bit machines should configure to 32/16 bits respectively. 173 */ 192 */ 174 enum qed_rdma_cq_mode { 193 enum qed_rdma_cq_mode { 175 QED_RDMA_CQ_MODE_16_BITS, 194 QED_RDMA_CQ_MODE_16_BITS, 176 QED_RDMA_CQ_MODE_32_BITS, 195 QED_RDMA_CQ_MODE_32_BITS, 177 }; 196 }; 178 197 179 struct qed_roce_dcqcn_params { 198 struct qed_roce_dcqcn_params { 180 u8 notification_point; 199 u8 notification_point; 181 u8 reaction_point; 200 u8 reaction_point; 182 201 183 /* fields for notification point */ 202 /* fields for notification point */ 184 u32 cnp_send_timeout; 203 u32 cnp_send_timeout; 185 204 186 /* fields for reaction point */ 205 /* fields for reaction point */ 187 u32 rl_bc_rate; 206 u32 rl_bc_rate; 188 u16 rl_max_rate; 207 u16 rl_max_rate; 189 u16 rl_r_ai; 208 u16 rl_r_ai; 190 u16 rl_r_hai; 209 u16 rl_r_hai; 191 u16 dcqcn_g; 210 u16 dcqcn_g; 192 u32 dcqcn_k_us; 211 u32 dcqcn_k_us; 193 u32 dcqcn_timeout_us; 212 u32 dcqcn_timeout_us; 194 }; 213 }; 195 214 196 struct qed_rdma_start_in_params { 215 struct qed_rdma_start_in_params { 197 struct qed_rdma_events *events; 216 struct qed_rdma_events *events; 198 struct qed_rdma_cnq_params cnq_pbl_lis 217 struct qed_rdma_cnq_params cnq_pbl_list[128]; 199 u8 desired_cnq; 218 u8 desired_cnq; 200 enum qed_rdma_cq_mode cq_mode; 219 enum qed_rdma_cq_mode cq_mode; 201 struct qed_roce_dcqcn_params dcqcn_par 220 struct qed_roce_dcqcn_params dcqcn_params; 202 u16 max_mtu; 221 u16 max_mtu; 203 u8 mac_addr[ETH_ALEN]; 222 u8 mac_addr[ETH_ALEN]; 204 u8 iwarp_flags; 223 u8 iwarp_flags; 205 }; 224 }; 206 225 207 struct qed_rdma_add_user_out_params { 226 struct qed_rdma_add_user_out_params { 208 u16 dpi; 227 u16 dpi; 209 void __iomem *dpi_addr; !! 228 u64 dpi_addr; 210 u64 dpi_phys_addr; 229 u64 dpi_phys_addr; 211 u32 dpi_size; 230 u32 dpi_size; 212 u16 wid_count; 231 u16 wid_count; 213 }; 232 }; 214 233 215 enum roce_mode { 234 enum roce_mode { 216 ROCE_V1, 235 ROCE_V1, 217 ROCE_V2_IPV4, 236 ROCE_V2_IPV4, 218 ROCE_V2_IPV6, 237 ROCE_V2_IPV6, 219 MAX_ROCE_MODE 238 MAX_ROCE_MODE 220 }; 239 }; 221 240 222 union qed_gid { 241 union qed_gid { 223 u8 bytes[16]; 242 u8 bytes[16]; 224 u16 words[8]; 243 u16 words[8]; 225 u32 dwords[4]; 244 u32 dwords[4]; 226 u64 qwords[2]; 245 u64 qwords[2]; 227 u32 ipv4_addr; 246 u32 ipv4_addr; 228 }; 247 }; 229 248 230 struct qed_rdma_register_tid_in_params { 249 struct qed_rdma_register_tid_in_params { 231 u32 itid; 250 u32 itid; 232 enum qed_rdma_tid_type tid_type; 251 enum qed_rdma_tid_type tid_type; 233 u8 key; 252 u8 key; 234 u16 pd; 253 u16 pd; 235 bool local_read; 254 bool local_read; 236 bool local_write; 255 bool local_write; 237 bool remote_read; 256 bool remote_read; 238 bool remote_write; 257 bool remote_write; 239 bool remote_atomic; 258 bool remote_atomic; 240 bool mw_bind; 259 bool mw_bind; 241 u64 pbl_ptr; 260 u64 pbl_ptr; 242 bool pbl_two_level; 261 bool pbl_two_level; 243 u8 pbl_page_size_log; 262 u8 pbl_page_size_log; 244 u8 page_size_log; 263 u8 page_size_log; >> 264 u32 fbo; 245 u64 length; 265 u64 length; 246 u64 vaddr; 266 u64 vaddr; >> 267 bool zbva; 247 bool phy_mr; 268 bool phy_mr; 248 bool dma_mr; 269 bool dma_mr; 249 270 250 bool dif_enabled; 271 bool dif_enabled; 251 u64 dif_error_addr; 272 u64 dif_error_addr; 252 }; 273 }; 253 274 254 struct qed_rdma_create_cq_in_params { 275 struct qed_rdma_create_cq_in_params { 255 u32 cq_handle_lo; 276 u32 cq_handle_lo; 256 u32 cq_handle_hi; 277 u32 cq_handle_hi; 257 u32 cq_size; 278 u32 cq_size; 258 u16 dpi; 279 u16 dpi; 259 bool pbl_two_level; 280 bool pbl_two_level; 260 u64 pbl_ptr; 281 u64 pbl_ptr; 261 u16 pbl_num_pages; 282 u16 pbl_num_pages; 262 u8 pbl_page_size_log; 283 u8 pbl_page_size_log; 263 u8 cnq_id; 284 u8 cnq_id; 264 u16 int_timeout; 285 u16 int_timeout; 265 }; 286 }; 266 287 267 struct qed_rdma_create_srq_in_params { 288 struct qed_rdma_create_srq_in_params { 268 u64 pbl_base_addr; 289 u64 pbl_base_addr; 269 u64 prod_pair_addr; 290 u64 prod_pair_addr; 270 u16 num_pages; 291 u16 num_pages; 271 u16 pd_id; 292 u16 pd_id; 272 u16 page_size; 293 u16 page_size; 273 << 274 /* XRC related only */ << 275 bool reserved_key_en; << 276 bool is_xrc; << 277 u32 cq_cid; << 278 u16 xrcd_id; << 279 }; 294 }; 280 295 281 struct qed_rdma_destroy_cq_in_params { 296 struct qed_rdma_destroy_cq_in_params { 282 u16 icid; 297 u16 icid; 283 }; 298 }; 284 299 285 struct qed_rdma_destroy_cq_out_params { 300 struct qed_rdma_destroy_cq_out_params { 286 u16 num_cq_notif; 301 u16 num_cq_notif; 287 }; 302 }; 288 303 289 struct qed_rdma_create_qp_in_params { 304 struct qed_rdma_create_qp_in_params { 290 u32 qp_handle_lo; 305 u32 qp_handle_lo; 291 u32 qp_handle_hi; 306 u32 qp_handle_hi; 292 u32 qp_handle_async_lo; 307 u32 qp_handle_async_lo; 293 u32 qp_handle_async_hi; 308 u32 qp_handle_async_hi; 294 bool use_srq; 309 bool use_srq; 295 bool signal_all; 310 bool signal_all; 296 bool fmr_and_reserved_lkey; 311 bool fmr_and_reserved_lkey; 297 u16 pd; 312 u16 pd; 298 u16 dpi; 313 u16 dpi; 299 u16 sq_cq_id; 314 u16 sq_cq_id; 300 u16 sq_num_pages; 315 u16 sq_num_pages; 301 u64 sq_pbl_ptr; 316 u64 sq_pbl_ptr; 302 u8 max_sq_sges; 317 u8 max_sq_sges; 303 u16 rq_cq_id; 318 u16 rq_cq_id; 304 u16 rq_num_pages; 319 u16 rq_num_pages; 305 u64 rq_pbl_ptr; 320 u64 rq_pbl_ptr; 306 u16 srq_id; 321 u16 srq_id; 307 u16 xrcd_id; << 308 u8 stats_queue; 322 u8 stats_queue; 309 enum qed_rdma_qp_type qp_type; << 310 u8 flags; << 311 #define QED_ROCE_EDPM_MODE_MASK 0x1 << 312 #define QED_ROCE_EDPM_MODE_SHIFT 0 << 313 }; 323 }; 314 324 315 struct qed_rdma_create_qp_out_params { 325 struct qed_rdma_create_qp_out_params { 316 u32 qp_id; 326 u32 qp_id; 317 u16 icid; 327 u16 icid; 318 void *rq_pbl_virt; 328 void *rq_pbl_virt; 319 dma_addr_t rq_pbl_phys; 329 dma_addr_t rq_pbl_phys; 320 void *sq_pbl_virt; 330 void *sq_pbl_virt; 321 dma_addr_t sq_pbl_phys; 331 dma_addr_t sq_pbl_phys; 322 }; 332 }; 323 333 324 struct qed_rdma_modify_qp_in_params { 334 struct qed_rdma_modify_qp_in_params { 325 u32 modify_flags; 335 u32 modify_flags; 326 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MAS 336 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 327 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHI 337 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 328 #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 338 #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 329 #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 339 #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 330 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_M 340 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 331 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_S 341 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 332 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 342 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 333 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 343 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 334 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTO 344 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 335 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTO 345 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 336 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 346 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 337 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 347 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 338 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 348 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 339 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 349 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 340 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC 350 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 341 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC 351 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 342 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC 352 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 343 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC 353 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 344 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_M 354 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 345 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_S 355 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 346 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MAS 356 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 347 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHI 357 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 348 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT 358 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 349 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT 359 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 350 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_T 360 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 351 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_T 361 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 352 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONT 362 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 353 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONT 363 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 354 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MAS 364 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 355 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHI 365 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 356 366 357 enum qed_roce_qp_state new_state; 367 enum qed_roce_qp_state new_state; 358 u16 pkey; 368 u16 pkey; 359 bool incoming_rdma_read_en; 369 bool incoming_rdma_read_en; 360 bool incoming_rdma_write_en; 370 bool incoming_rdma_write_en; 361 bool incoming_atomic_en; 371 bool incoming_atomic_en; 362 bool e2e_flow_control_en; 372 bool e2e_flow_control_en; 363 u32 dest_qp; 373 u32 dest_qp; 364 bool lb_indication; 374 bool lb_indication; 365 u16 mtu; 375 u16 mtu; 366 u8 traffic_class_tos; 376 u8 traffic_class_tos; 367 u8 hop_limit_ttl; 377 u8 hop_limit_ttl; 368 u32 flow_label; 378 u32 flow_label; 369 union qed_gid sgid; 379 union qed_gid sgid; 370 union qed_gid dgid; 380 union qed_gid dgid; 371 u16 udp_src_port; 381 u16 udp_src_port; 372 382 373 u16 vlan_id; 383 u16 vlan_id; 374 384 375 u32 rq_psn; 385 u32 rq_psn; 376 u32 sq_psn; 386 u32 sq_psn; 377 u8 max_rd_atomic_resp; 387 u8 max_rd_atomic_resp; 378 u8 max_rd_atomic_req; 388 u8 max_rd_atomic_req; 379 u32 ack_timeout; 389 u32 ack_timeout; 380 u8 retry_cnt; 390 u8 retry_cnt; 381 u8 rnr_retry_cnt; 391 u8 rnr_retry_cnt; 382 u8 min_rnr_nak_timer; 392 u8 min_rnr_nak_timer; 383 bool sqd_async; 393 bool sqd_async; 384 u8 remote_mac_addr[6]; 394 u8 remote_mac_addr[6]; 385 u8 local_mac_addr[6]; 395 u8 local_mac_addr[6]; 386 bool use_local_mac; 396 bool use_local_mac; 387 enum roce_mode roce_mode; 397 enum roce_mode roce_mode; 388 }; 398 }; 389 399 390 struct qed_rdma_query_qp_out_params { 400 struct qed_rdma_query_qp_out_params { 391 enum qed_roce_qp_state state; 401 enum qed_roce_qp_state state; 392 u32 rq_psn; 402 u32 rq_psn; 393 u32 sq_psn; 403 u32 sq_psn; 394 bool draining; 404 bool draining; 395 u16 mtu; 405 u16 mtu; 396 u32 dest_qp; 406 u32 dest_qp; 397 bool incoming_rdma_read_en; 407 bool incoming_rdma_read_en; 398 bool incoming_rdma_write_en; 408 bool incoming_rdma_write_en; 399 bool incoming_atomic_en; 409 bool incoming_atomic_en; 400 bool e2e_flow_control_en; 410 bool e2e_flow_control_en; 401 union qed_gid sgid; 411 union qed_gid sgid; 402 union qed_gid dgid; 412 union qed_gid dgid; 403 u32 flow_label; 413 u32 flow_label; 404 u8 hop_limit_ttl; 414 u8 hop_limit_ttl; 405 u8 traffic_class_tos; 415 u8 traffic_class_tos; 406 u32 timeout; 416 u32 timeout; 407 u8 rnr_retry; 417 u8 rnr_retry; 408 u8 retry_cnt; 418 u8 retry_cnt; 409 u8 min_rnr_nak_timer; 419 u8 min_rnr_nak_timer; 410 u16 pkey_index; 420 u16 pkey_index; 411 u8 max_rd_atomic; 421 u8 max_rd_atomic; 412 u8 max_dest_rd_atomic; 422 u8 max_dest_rd_atomic; 413 bool sqd_async; 423 bool sqd_async; 414 }; 424 }; 415 425 416 struct qed_rdma_create_srq_out_params { 426 struct qed_rdma_create_srq_out_params { 417 u16 srq_id; 427 u16 srq_id; 418 }; 428 }; 419 429 420 struct qed_rdma_destroy_srq_in_params { 430 struct qed_rdma_destroy_srq_in_params { 421 u16 srq_id; 431 u16 srq_id; 422 bool is_xrc; << 423 }; 432 }; 424 433 425 struct qed_rdma_modify_srq_in_params { 434 struct qed_rdma_modify_srq_in_params { 426 u32 wqe_limit; 435 u32 wqe_limit; 427 u16 srq_id; 436 u16 srq_id; 428 bool is_xrc; << 429 }; 437 }; 430 438 431 struct qed_rdma_stats_out_params { 439 struct qed_rdma_stats_out_params { 432 u64 sent_bytes; 440 u64 sent_bytes; 433 u64 sent_pkts; 441 u64 sent_pkts; 434 u64 rcv_bytes; 442 u64 rcv_bytes; 435 u64 rcv_pkts; 443 u64 rcv_pkts; 436 }; 444 }; 437 445 438 struct qed_rdma_counters_out_params { 446 struct qed_rdma_counters_out_params { 439 u64 pd_count; 447 u64 pd_count; 440 u64 max_pd; 448 u64 max_pd; 441 u64 dpi_count; 449 u64 dpi_count; 442 u64 max_dpi; 450 u64 max_dpi; 443 u64 cq_count; 451 u64 cq_count; 444 u64 max_cq; 452 u64 max_cq; 445 u64 qp_count; 453 u64 qp_count; 446 u64 max_qp; 454 u64 max_qp; 447 u64 tid_count; 455 u64 tid_count; 448 u64 max_tid; 456 u64 max_tid; 449 }; 457 }; 450 458 451 #define QED_ROCE_TX_HEAD_FAILURE (1) 459 #define QED_ROCE_TX_HEAD_FAILURE (1) 452 #define QED_ROCE_TX_FRAG_FAILURE (2) 460 #define QED_ROCE_TX_FRAG_FAILURE (2) 453 461 454 enum qed_iwarp_event_type { 462 enum qed_iwarp_event_type { 455 QED_IWARP_EVENT_MPA_REQUEST, /* P 463 QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ 456 QED_IWARP_EVENT_PASSIVE_COMPLETE, /* a 464 QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ 457 QED_IWARP_EVENT_ACTIVE_COMPLETE, /* A 465 QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ 458 QED_IWARP_EVENT_DISCONNECT, 466 QED_IWARP_EVENT_DISCONNECT, 459 QED_IWARP_EVENT_CLOSE, 467 QED_IWARP_EVENT_CLOSE, 460 QED_IWARP_EVENT_IRQ_FULL, 468 QED_IWARP_EVENT_IRQ_FULL, 461 QED_IWARP_EVENT_RQ_EMPTY, 469 QED_IWARP_EVENT_RQ_EMPTY, 462 QED_IWARP_EVENT_LLP_TIMEOUT, 470 QED_IWARP_EVENT_LLP_TIMEOUT, 463 QED_IWARP_EVENT_REMOTE_PROTECTION_ERRO 471 QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, 464 QED_IWARP_EVENT_CQ_OVERFLOW, 472 QED_IWARP_EVENT_CQ_OVERFLOW, 465 QED_IWARP_EVENT_QP_CATASTROPHIC, 473 QED_IWARP_EVENT_QP_CATASTROPHIC, 466 QED_IWARP_EVENT_ACTIVE_MPA_REPLY, 474 QED_IWARP_EVENT_ACTIVE_MPA_REPLY, 467 QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, 475 QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, 468 QED_IWARP_EVENT_REMOTE_OPERATION_ERROR 476 QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, 469 QED_IWARP_EVENT_TERMINATE_RECEIVED, 477 QED_IWARP_EVENT_TERMINATE_RECEIVED, 470 QED_IWARP_EVENT_SRQ_LIMIT, 478 QED_IWARP_EVENT_SRQ_LIMIT, 471 QED_IWARP_EVENT_SRQ_EMPTY, 479 QED_IWARP_EVENT_SRQ_EMPTY, 472 }; 480 }; 473 481 474 enum qed_tcp_ip_version { 482 enum qed_tcp_ip_version { 475 QED_TCP_IPV4, 483 QED_TCP_IPV4, 476 QED_TCP_IPV6, 484 QED_TCP_IPV6, 477 }; 485 }; 478 486 479 struct qed_iwarp_cm_info { 487 struct qed_iwarp_cm_info { 480 enum qed_tcp_ip_version ip_version; 488 enum qed_tcp_ip_version ip_version; 481 u32 remote_ip[4]; 489 u32 remote_ip[4]; 482 u32 local_ip[4]; 490 u32 local_ip[4]; 483 u16 remote_port; 491 u16 remote_port; 484 u16 local_port; 492 u16 local_port; 485 u16 vlan; 493 u16 vlan; 486 u8 ord; 494 u8 ord; 487 u8 ird; 495 u8 ird; 488 u16 private_data_len; 496 u16 private_data_len; 489 const void *private_data; 497 const void *private_data; 490 }; 498 }; 491 499 492 struct qed_iwarp_cm_event_params { 500 struct qed_iwarp_cm_event_params { 493 enum qed_iwarp_event_type event; 501 enum qed_iwarp_event_type event; 494 const struct qed_iwarp_cm_info *cm_inf 502 const struct qed_iwarp_cm_info *cm_info; 495 void *ep_context; /* To be passe 503 void *ep_context; /* To be passed to accept call */ 496 int status; 504 int status; 497 }; 505 }; 498 506 499 typedef int (*iwarp_event_handler) (void *cont 507 typedef int (*iwarp_event_handler) (void *context, 500 struct qed 508 struct qed_iwarp_cm_event_params *event); 501 509 502 struct qed_iwarp_connect_in { 510 struct qed_iwarp_connect_in { 503 iwarp_event_handler event_cb; 511 iwarp_event_handler event_cb; 504 void *cb_context; 512 void *cb_context; 505 struct qed_rdma_qp *qp; 513 struct qed_rdma_qp *qp; 506 struct qed_iwarp_cm_info cm_info; 514 struct qed_iwarp_cm_info cm_info; 507 u16 mss; 515 u16 mss; 508 u8 remote_mac_addr[ETH_ALEN]; 516 u8 remote_mac_addr[ETH_ALEN]; 509 u8 local_mac_addr[ETH_ALEN]; 517 u8 local_mac_addr[ETH_ALEN]; 510 }; 518 }; 511 519 512 struct qed_iwarp_connect_out { 520 struct qed_iwarp_connect_out { 513 void *ep_context; 521 void *ep_context; 514 }; 522 }; 515 523 516 struct qed_iwarp_listen_in { 524 struct qed_iwarp_listen_in { 517 iwarp_event_handler event_cb; 525 iwarp_event_handler event_cb; 518 void *cb_context; /* passed to e 526 void *cb_context; /* passed to event_cb */ 519 u32 max_backlog; 527 u32 max_backlog; 520 enum qed_tcp_ip_version ip_version; 528 enum qed_tcp_ip_version ip_version; 521 u32 ip_addr[4]; 529 u32 ip_addr[4]; 522 u16 port; 530 u16 port; 523 u16 vlan; 531 u16 vlan; 524 }; 532 }; 525 533 526 struct qed_iwarp_listen_out { 534 struct qed_iwarp_listen_out { 527 void *handle; 535 void *handle; 528 }; 536 }; 529 537 530 struct qed_iwarp_accept_in { 538 struct qed_iwarp_accept_in { 531 void *ep_context; 539 void *ep_context; 532 void *cb_context; 540 void *cb_context; 533 struct qed_rdma_qp *qp; 541 struct qed_rdma_qp *qp; 534 const void *private_data; 542 const void *private_data; 535 u16 private_data_len; 543 u16 private_data_len; 536 u8 ord; 544 u8 ord; 537 u8 ird; 545 u8 ird; 538 }; 546 }; 539 547 540 struct qed_iwarp_reject_in { 548 struct qed_iwarp_reject_in { 541 void *ep_context; 549 void *ep_context; 542 void *cb_context; 550 void *cb_context; 543 const void *private_data; 551 const void *private_data; 544 u16 private_data_len; 552 u16 private_data_len; 545 }; 553 }; 546 554 547 struct qed_iwarp_send_rtr_in { 555 struct qed_iwarp_send_rtr_in { 548 void *ep_context; 556 void *ep_context; 549 }; 557 }; 550 558 551 struct qed_roce_ll2_header { 559 struct qed_roce_ll2_header { 552 void *vaddr; 560 void *vaddr; 553 dma_addr_t baddr; 561 dma_addr_t baddr; 554 size_t len; 562 size_t len; 555 }; 563 }; 556 564 557 struct qed_roce_ll2_buffer { 565 struct qed_roce_ll2_buffer { 558 dma_addr_t baddr; 566 dma_addr_t baddr; 559 size_t len; 567 size_t len; 560 }; 568 }; 561 569 562 struct qed_roce_ll2_packet { 570 struct qed_roce_ll2_packet { 563 struct qed_roce_ll2_header header; 571 struct qed_roce_ll2_header header; 564 int n_seg; 572 int n_seg; 565 struct qed_roce_ll2_buffer payload[RDM 573 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; 566 int roce_mode; 574 int roce_mode; 567 enum qed_ll2_tx_dest tx_dest; 575 enum qed_ll2_tx_dest tx_dest; 568 }; 576 }; 569 577 570 enum qed_rdma_type { 578 enum qed_rdma_type { 571 QED_RDMA_TYPE_ROCE, 579 QED_RDMA_TYPE_ROCE, 572 QED_RDMA_TYPE_IWARP 580 QED_RDMA_TYPE_IWARP 573 }; 581 }; 574 582 575 struct qed_dev_rdma_info { 583 struct qed_dev_rdma_info { 576 struct qed_dev_info common; 584 struct qed_dev_info common; 577 enum qed_rdma_type rdma_type; 585 enum qed_rdma_type rdma_type; 578 u8 user_dpm_enabled; 586 u8 user_dpm_enabled; 579 }; 587 }; 580 588 581 struct qed_rdma_ops { 589 struct qed_rdma_ops { 582 const struct qed_common_ops *common; 590 const struct qed_common_ops *common; 583 591 584 int (*fill_dev_info)(struct qed_dev *c 592 int (*fill_dev_info)(struct qed_dev *cdev, 585 struct qed_dev_rd 593 struct qed_dev_rdma_info *info); 586 void *(*rdma_get_rdma_ctx)(struct qed_ 594 void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); 587 595 588 int (*rdma_init)(struct qed_dev *dev, 596 int (*rdma_init)(struct qed_dev *dev, 589 struct qed_rdma_start 597 struct qed_rdma_start_in_params *iparams); 590 598 591 int (*rdma_add_user)(void *rdma_cxt, 599 int (*rdma_add_user)(void *rdma_cxt, 592 struct qed_rdma_a 600 struct qed_rdma_add_user_out_params *oparams); 593 601 594 void (*rdma_remove_user)(void *rdma_cx 602 void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); 595 int (*rdma_stop)(void *rdma_cxt); 603 int (*rdma_stop)(void *rdma_cxt); 596 struct qed_rdma_device* (*rdma_query_d 604 struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); 597 struct qed_rdma_port* (*rdma_query_por 605 struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); 598 int (*rdma_get_start_sb)(struct qed_de 606 int (*rdma_get_start_sb)(struct qed_dev *cdev); 599 int (*rdma_get_min_cnq_msix)(struct qe 607 int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); 600 void (*rdma_cnq_prod_update)(void *rdm 608 void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); 601 int (*rdma_get_rdma_int)(struct qed_de 609 int (*rdma_get_rdma_int)(struct qed_dev *cdev, 602 struct qed_in 610 struct qed_int_info *info); 603 int (*rdma_set_rdma_int)(struct qed_de 611 int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); 604 int (*rdma_alloc_pd)(void *rdma_cxt, u 612 int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); 605 void (*rdma_dealloc_pd)(void *rdma_cxt 613 void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); 606 int (*rdma_alloc_xrcd)(void *rdma_cxt, << 607 void (*rdma_dealloc_xrcd)(void *rdma_c << 608 int (*rdma_create_cq)(void *rdma_cxt, 614 int (*rdma_create_cq)(void *rdma_cxt, 609 struct qed_rdma_ 615 struct qed_rdma_create_cq_in_params *params, 610 u16 *icid); 616 u16 *icid); 611 int (*rdma_destroy_cq)(void *rdma_cxt, 617 int (*rdma_destroy_cq)(void *rdma_cxt, 612 struct qed_rdma 618 struct qed_rdma_destroy_cq_in_params *iparams, 613 struct qed_rdma 619 struct qed_rdma_destroy_cq_out_params *oparams); 614 struct qed_rdma_qp * 620 struct qed_rdma_qp * 615 (*rdma_create_qp)(void *rdma_cxt, 621 (*rdma_create_qp)(void *rdma_cxt, 616 struct qed_rdma_crea 622 struct qed_rdma_create_qp_in_params *iparams, 617 struct qed_rdma_crea 623 struct qed_rdma_create_qp_out_params *oparams); 618 624 619 int (*rdma_modify_qp)(void *roce_cxt, 625 int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, 620 struct qed_rdma_ 626 struct qed_rdma_modify_qp_in_params *iparams); 621 627 622 int (*rdma_query_qp)(void *rdma_cxt, s 628 int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, 623 struct qed_rdma_q 629 struct qed_rdma_query_qp_out_params *oparams); 624 int (*rdma_destroy_qp)(void *rdma_cxt, 630 int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); 625 631 626 int 632 int 627 (*rdma_register_tid)(void *rdma_cxt, 633 (*rdma_register_tid)(void *rdma_cxt, 628 struct qed_rdma_r 634 struct qed_rdma_register_tid_in_params *iparams); 629 635 630 int (*rdma_deregister_tid)(void *rdma_ 636 int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); 631 int (*rdma_alloc_tid)(void *rdma_cxt, 637 int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); 632 void (*rdma_free_tid)(void *rdma_cxt, 638 void (*rdma_free_tid)(void *rdma_cxt, u32 itid); 633 639 634 int (*rdma_create_srq)(void *rdma_cxt, 640 int (*rdma_create_srq)(void *rdma_cxt, 635 struct qed_rdma 641 struct qed_rdma_create_srq_in_params *iparams, 636 struct qed_rdma 642 struct qed_rdma_create_srq_out_params *oparams); 637 int (*rdma_destroy_srq)(void *rdma_cxt 643 int (*rdma_destroy_srq)(void *rdma_cxt, 638 struct qed_rdm 644 struct qed_rdma_destroy_srq_in_params *iparams); 639 int (*rdma_modify_srq)(void *rdma_cxt, 645 int (*rdma_modify_srq)(void *rdma_cxt, 640 struct qed_rdma 646 struct qed_rdma_modify_srq_in_params *iparams); 641 647 642 int (*ll2_acquire_connection)(void *rd 648 int (*ll2_acquire_connection)(void *rdma_cxt, 643 struct q 649 struct qed_ll2_acquire_data *data); 644 650 645 int (*ll2_establish_connection)(void * 651 int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); 646 int (*ll2_terminate_connection)(void * 652 int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); 647 void (*ll2_release_connection)(void *r 653 void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); 648 654 649 int (*ll2_prepare_tx_packet)(void *rdm 655 int (*ll2_prepare_tx_packet)(void *rdma_cxt, 650 u8 connec 656 u8 connection_handle, 651 struct qe 657 struct qed_ll2_tx_pkt_info *pkt, 652 bool noti 658 bool notify_fw); 653 659 654 int (*ll2_set_fragment_of_tx_packet)(v 660 int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, 655 u 661 u8 connection_handle, 656 d 662 dma_addr_t addr, 657 u 663 u16 nbytes); 658 int (*ll2_post_rx_buffer)(void *rdma_c 664 int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, 659 dma_addr_t a 665 dma_addr_t addr, u16 buf_len, void *cookie, 660 u8 notify_fw 666 u8 notify_fw); 661 int (*ll2_get_stats)(void *rdma_cxt, 667 int (*ll2_get_stats)(void *rdma_cxt, 662 u8 connection_han 668 u8 connection_handle, 663 struct qed_ll2_st 669 struct qed_ll2_stats *p_stats); 664 int (*ll2_set_mac_filter)(struct qed_d 670 int (*ll2_set_mac_filter)(struct qed_dev *cdev, 665 u8 *old_mac_ !! 671 u8 *old_mac_address, u8 *new_mac_address); 666 const u8 *ne << 667 672 668 int (*iwarp_set_engine_affin)(struct q 673 int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); 669 674 670 int (*iwarp_connect)(void *rdma_cxt, 675 int (*iwarp_connect)(void *rdma_cxt, 671 struct qed_iwarp_ 676 struct qed_iwarp_connect_in *iparams, 672 struct qed_iwarp_ 677 struct qed_iwarp_connect_out *oparams); 673 678 674 int (*iwarp_create_listen)(void *rdma_ 679 int (*iwarp_create_listen)(void *rdma_cxt, 675 struct qed_ 680 struct qed_iwarp_listen_in *iparams, 676 struct qed_ 681 struct qed_iwarp_listen_out *oparams); 677 682 678 int (*iwarp_accept)(void *rdma_cxt, 683 int (*iwarp_accept)(void *rdma_cxt, 679 struct qed_iwarp_a 684 struct qed_iwarp_accept_in *iparams); 680 685 681 int (*iwarp_reject)(void *rdma_cxt, 686 int (*iwarp_reject)(void *rdma_cxt, 682 struct qed_iwarp_r 687 struct qed_iwarp_reject_in *iparams); 683 688 684 int (*iwarp_destroy_listen)(void *rdma 689 int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); 685 690 686 int (*iwarp_send_rtr)(void *rdma_cxt, 691 int (*iwarp_send_rtr)(void *rdma_cxt, 687 struct qed_iwarp 692 struct qed_iwarp_send_rtr_in *iparams); 688 }; 693 }; 689 694 690 const struct qed_rdma_ops *qed_get_rdma_ops(vo 695 const struct qed_rdma_ops *qed_get_rdma_ops(void); 691 696 692 #endif 697 #endif 693 698
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.