~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/rds/ib.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /net/rds/ib.h (Version linux-6.12-rc7) and /net/rds/ib.h (Version linux-4.9.337)


  1 /* SPDX-License-Identifier: GPL-2.0 */         << 
  2 #ifndef _RDS_IB_H                                   1 #ifndef _RDS_IB_H
  3 #define _RDS_IB_H                                   2 #define _RDS_IB_H
  4                                                     3 
  5 #include <rdma/ib_verbs.h>                          4 #include <rdma/ib_verbs.h>
  6 #include <rdma/rdma_cm.h>                           5 #include <rdma/rdma_cm.h>
  7 #include <linux/interrupt.h>                        6 #include <linux/interrupt.h>
  8 #include <linux/pci.h>                              7 #include <linux/pci.h>
  9 #include <linux/slab.h>                             8 #include <linux/slab.h>
 10 #include "rds.h"                                    9 #include "rds.h"
 11 #include "rdma_transport.h"                        10 #include "rdma_transport.h"
 12                                                    11 
 13 #define RDS_IB_MAX_SGE                  8          12 #define RDS_IB_MAX_SGE                  8
 14 #define RDS_IB_RECV_SGE                 2          13 #define RDS_IB_RECV_SGE                 2
 15                                                    14 
 16 #define RDS_IB_DEFAULT_RECV_WR          1024       15 #define RDS_IB_DEFAULT_RECV_WR          1024
 17 #define RDS_IB_DEFAULT_SEND_WR          256        16 #define RDS_IB_DEFAULT_SEND_WR          256
 18 #define RDS_IB_DEFAULT_FR_WR            512        17 #define RDS_IB_DEFAULT_FR_WR            512
 19                                                    18 
 20 #define RDS_IB_DEFAULT_RETRY_COUNT      1      !!  19 #define RDS_IB_DEFAULT_RETRY_COUNT      2
 21                                                    20 
 22 #define RDS_IB_SUPPORTED_PROTOCOLS      0x0000     21 #define RDS_IB_SUPPORTED_PROTOCOLS      0x00000003      /* minor versions supported */
 23                                                    22 
 24 #define RDS_IB_RECYCLE_BATCH_COUNT      32         23 #define RDS_IB_RECYCLE_BATCH_COUNT      32
 25                                                    24 
 26 #define RDS_IB_WC_MAX                   32         25 #define RDS_IB_WC_MAX                   32
 27                                                    26 
 28 extern struct rw_semaphore rds_ib_devices_lock     27 extern struct rw_semaphore rds_ib_devices_lock;
 29 extern struct list_head rds_ib_devices;            28 extern struct list_head rds_ib_devices;
 30                                                    29 
 31 /*                                                 30 /*
 32  * IB posts RDS_FRAG_SIZE fragments of pages t     31  * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
 33  * try and minimize the amount of memory tied      32  * try and minimize the amount of memory tied up both the device and
 34  * socket receive queues.                          33  * socket receive queues.
 35  */                                                34  */
 36 struct rds_page_frag {                             35 struct rds_page_frag {
 37         struct list_head        f_item;            36         struct list_head        f_item;
 38         struct list_head        f_cache_entry;     37         struct list_head        f_cache_entry;
 39         struct scatterlist      f_sg;              38         struct scatterlist      f_sg;
 40 };                                                 39 };
 41                                                    40 
 42 struct rds_ib_incoming {                           41 struct rds_ib_incoming {
 43         struct list_head        ii_frags;          42         struct list_head        ii_frags;
 44         struct list_head        ii_cache_entry     43         struct list_head        ii_cache_entry;
 45         struct rds_incoming     ii_inc;            44         struct rds_incoming     ii_inc;
 46 };                                                 45 };
 47                                                    46 
 48 struct rds_ib_cache_head {                         47 struct rds_ib_cache_head {
 49         struct list_head *first;                   48         struct list_head *first;
 50         unsigned long count;                       49         unsigned long count;
 51 };                                                 50 };
 52                                                    51 
 53 struct rds_ib_refill_cache {                       52 struct rds_ib_refill_cache {
 54         struct rds_ib_cache_head __percpu *per     53         struct rds_ib_cache_head __percpu *percpu;
 55         struct list_head         *xfer;            54         struct list_head         *xfer;
 56         struct list_head         *ready;           55         struct list_head         *ready;
 57 };                                                 56 };
 58                                                    57 
 59 /* This is the common structure for the IB pri << 
 60  * an RDS connection.  The exchange is differe << 
 61  * The reason is that the address size is diff << 
 62  * exchanged are in the beginning of the struc << 
 63  * for interoperability if same structure is u << 
 64  */                                            << 
 65 struct rds_ib_conn_priv_cmn {                  << 
 66         u8                      ricpc_protocol << 
 67         u8                      ricpc_protocol << 
 68         __be16                  ricpc_protocol << 
 69         u8                      ricpc_dp_toss; << 
 70         u8                      ripc_reserved1 << 
 71         __be16                  ripc_reserved2 << 
 72         __be64                  ricpc_ack_seq; << 
 73         __be32                  ricpc_credit;  << 
 74 };                                             << 
 75                                                << 
 76 struct rds_ib_connect_private {                    58 struct rds_ib_connect_private {
 77         /* Add new fields at the end, and don'     59         /* Add new fields at the end, and don't permute existing fields. */
 78         __be32                          dp_sad !!  60         __be32                  dp_saddr;
 79         __be32                          dp_dad !!  61         __be32                  dp_daddr;
 80         struct rds_ib_conn_priv_cmn     dp_cmn !!  62         u8                      dp_protocol_major;
 81 };                                             !!  63         u8                      dp_protocol_minor;
 82                                                !!  64         __be16                  dp_protocol_minor_mask; /* bitmask */
 83 struct rds6_ib_connect_private {               !!  65         __be32                  dp_reserved1;
 84         /* Add new fields at the end, and don' !!  66         __be64                  dp_ack_seq;
 85         struct in6_addr                 dp_sad !!  67         __be32                  dp_credit;              /* non-zero enables flow ctl */
 86         struct in6_addr                 dp_dad << 
 87         struct rds_ib_conn_priv_cmn     dp_cmn << 
 88 };                                             << 
 89                                                << 
 90 #define dp_protocol_major       dp_cmn.ricpc_p << 
 91 #define dp_protocol_minor       dp_cmn.ricpc_p << 
 92 #define dp_protocol_minor_mask  dp_cmn.ricpc_p << 
 93 #define dp_ack_seq              dp_cmn.ricpc_a << 
 94 #define dp_credit               dp_cmn.ricpc_c << 
 95                                                << 
 96 union rds_ib_conn_priv {                       << 
 97         struct rds_ib_connect_private   ricp_v << 
 98         struct rds6_ib_connect_private  ricp_v << 
 99 };                                                 68 };
100                                                    69 
101 struct rds_ib_send_work {                          70 struct rds_ib_send_work {
102         void                    *s_op;             71         void                    *s_op;
103         union {                                    72         union {
104                 struct ib_send_wr       s_wr;      73                 struct ib_send_wr       s_wr;
105                 struct ib_rdma_wr       s_rdma     74                 struct ib_rdma_wr       s_rdma_wr;
106                 struct ib_atomic_wr     s_atom     75                 struct ib_atomic_wr     s_atomic_wr;
107         };                                         76         };
108         struct ib_sge           s_sge[RDS_IB_M     77         struct ib_sge           s_sge[RDS_IB_MAX_SGE];
109         unsigned long           s_queued;          78         unsigned long           s_queued;
110 };                                                 79 };
111                                                    80 
112 struct rds_ib_recv_work {                          81 struct rds_ib_recv_work {
113         struct rds_ib_incoming  *r_ibinc;          82         struct rds_ib_incoming  *r_ibinc;
114         struct rds_page_frag    *r_frag;           83         struct rds_page_frag    *r_frag;
115         struct ib_recv_wr       r_wr;              84         struct ib_recv_wr       r_wr;
116         struct ib_sge           r_sge[2];          85         struct ib_sge           r_sge[2];
117 };                                                 86 };
118                                                    87 
119 struct rds_ib_work_ring {                          88 struct rds_ib_work_ring {
120         u32             w_nr;                      89         u32             w_nr;
121         u32             w_alloc_ptr;               90         u32             w_alloc_ptr;
122         u32             w_alloc_ctr;               91         u32             w_alloc_ctr;
123         u32             w_free_ptr;                92         u32             w_free_ptr;
124         atomic_t        w_free_ctr;                93         atomic_t        w_free_ctr;
125 };                                                 94 };
126                                                    95 
127 /* Rings are posted with all the allocations t     96 /* Rings are posted with all the allocations they'll need to queue the
128  * incoming message to the receiving socket so     97  * incoming message to the receiving socket so this can't fail.
129  * All fragments start with a header, so we ca     98  * All fragments start with a header, so we can make sure we're not receiving
130  * garbage, and we can tell a small 8 byte fra     99  * garbage, and we can tell a small 8 byte fragment from an ACK frame.
131  */                                               100  */
132 struct rds_ib_ack_state {                         101 struct rds_ib_ack_state {
133         u64             ack_next;                 102         u64             ack_next;
134         u64             ack_recv;                 103         u64             ack_recv;
135         unsigned int    ack_required:1;           104         unsigned int    ack_required:1;
136         unsigned int    ack_next_valid:1;         105         unsigned int    ack_next_valid:1;
137         unsigned int    ack_recv_valid:1;         106         unsigned int    ack_recv_valid:1;
138 };                                                107 };
139                                                   108 
140                                                   109 
141 struct rds_ib_device;                             110 struct rds_ib_device;
142                                                   111 
143 struct rds_ib_connection {                        112 struct rds_ib_connection {
144                                                   113 
145         struct list_head        ib_node;          114         struct list_head        ib_node;
146         struct rds_ib_device    *rds_ibdev;       115         struct rds_ib_device    *rds_ibdev;
147         struct rds_connection   *conn;            116         struct rds_connection   *conn;
148                                                   117 
149         /* alphabet soup, IBTA style */           118         /* alphabet soup, IBTA style */
150         struct rdma_cm_id       *i_cm_id;         119         struct rdma_cm_id       *i_cm_id;
151         struct ib_pd            *i_pd;            120         struct ib_pd            *i_pd;
152         struct ib_cq            *i_send_cq;       121         struct ib_cq            *i_send_cq;
153         struct ib_cq            *i_recv_cq;       122         struct ib_cq            *i_recv_cq;
154         struct ib_wc            i_send_wc[RDS_    123         struct ib_wc            i_send_wc[RDS_IB_WC_MAX];
155         struct ib_wc            i_recv_wc[RDS_    124         struct ib_wc            i_recv_wc[RDS_IB_WC_MAX];
156                                                   125 
157         /* To control the number of wrs from f    126         /* To control the number of wrs from fastreg */
158         atomic_t                i_fastreg_wrs;    127         atomic_t                i_fastreg_wrs;
159         atomic_t                i_fastreg_inus << 
160                                                   128 
161         /* interrupt handling */                  129         /* interrupt handling */
162         struct tasklet_struct   i_send_tasklet    130         struct tasklet_struct   i_send_tasklet;
163         struct tasklet_struct   i_recv_tasklet    131         struct tasklet_struct   i_recv_tasklet;
164                                                   132 
165         /* tx */                                  133         /* tx */
166         struct rds_ib_work_ring i_send_ring;      134         struct rds_ib_work_ring i_send_ring;
167         struct rm_data_op       *i_data_op;       135         struct rm_data_op       *i_data_op;
168         struct rds_header       **i_send_hdrs; !! 136         struct rds_header       *i_send_hdrs;
169         dma_addr_t              *i_send_hdrs_d !! 137         u64                     i_send_hdrs_dma;
170         struct rds_ib_send_work *i_sends;         138         struct rds_ib_send_work *i_sends;
171         atomic_t                i_signaled_sen    139         atomic_t                i_signaled_sends;
172                                                   140 
173         /* rx */                                  141         /* rx */
174         struct mutex            i_recv_mutex;     142         struct mutex            i_recv_mutex;
175         struct rds_ib_work_ring i_recv_ring;      143         struct rds_ib_work_ring i_recv_ring;
176         struct rds_ib_incoming  *i_ibinc;         144         struct rds_ib_incoming  *i_ibinc;
177         u32                     i_recv_data_re    145         u32                     i_recv_data_rem;
178         struct rds_header       **i_recv_hdrs; !! 146         struct rds_header       *i_recv_hdrs;
179         dma_addr_t              *i_recv_hdrs_d !! 147         u64                     i_recv_hdrs_dma;
180         struct rds_ib_recv_work *i_recvs;         148         struct rds_ib_recv_work *i_recvs;
181         u64                     i_ack_recv;       149         u64                     i_ack_recv;     /* last ACK received */
182         struct rds_ib_refill_cache i_cache_inc    150         struct rds_ib_refill_cache i_cache_incs;
183         struct rds_ib_refill_cache i_cache_fra    151         struct rds_ib_refill_cache i_cache_frags;
184         atomic_t                i_cache_allocs << 
185                                                   152 
186         /* sending acks */                        153         /* sending acks */
187         unsigned long           i_ack_flags;      154         unsigned long           i_ack_flags;
188 #ifdef KERNEL_HAS_ATOMIC64                        155 #ifdef KERNEL_HAS_ATOMIC64
189         atomic64_t              i_ack_next;       156         atomic64_t              i_ack_next;     /* next ACK to send */
190 #else                                             157 #else
191         spinlock_t              i_ack_lock;       158         spinlock_t              i_ack_lock;     /* protect i_ack_next */
192         u64                     i_ack_next;       159         u64                     i_ack_next;     /* next ACK to send */
193 #endif                                            160 #endif
194         struct rds_header       *i_ack;           161         struct rds_header       *i_ack;
195         struct ib_send_wr       i_ack_wr;         162         struct ib_send_wr       i_ack_wr;
196         struct ib_sge           i_ack_sge;        163         struct ib_sge           i_ack_sge;
197         dma_addr_t              i_ack_dma;     !! 164         u64                     i_ack_dma;
198         unsigned long           i_ack_queued;     165         unsigned long           i_ack_queued;
199                                                   166 
200         /* Flow control related information       167         /* Flow control related information
201          *                                        168          *
202          * Our algorithm uses a pair variables    169          * Our algorithm uses a pair variables that we need to access
203          * atomically - one for the send credi    170          * atomically - one for the send credits, and one posted
204          * recv credits we need to transfer to    171          * recv credits we need to transfer to remote.
205          * Rather than protect them using a sl    172          * Rather than protect them using a slow spinlock, we put both into
206          * a single atomic_t and update it usi    173          * a single atomic_t and update it using cmpxchg
207          */                                       174          */
208         atomic_t                i_credits;        175         atomic_t                i_credits;
209                                                   176 
210         /* Protocol version specific informati    177         /* Protocol version specific information */
211         unsigned int            i_flowctl:1;      178         unsigned int            i_flowctl:1;    /* enable/disable flow ctl */
212                                                   179 
213         /* Batched completions */                 180         /* Batched completions */
214         unsigned int            i_unsignaled_w    181         unsigned int            i_unsignaled_wrs;
215                                                << 
216         /* Endpoint role in connection */      << 
217         bool                    i_active_side; << 
218         atomic_t                i_cq_quiesce;  << 
219                                                << 
220         /* Send/Recv vectors */                << 
221         int                     i_scq_vector;  << 
222         int                     i_rcq_vector;  << 
223         u8                      i_sl;          << 
224 };                                                182 };
225                                                   183 
226 /* This assumes that atomic_t is at least 32 b    184 /* This assumes that atomic_t is at least 32 bits */
227 #define IB_GET_SEND_CREDITS(v)  ((v) & 0xffff)    185 #define IB_GET_SEND_CREDITS(v)  ((v) & 0xffff)
228 #define IB_GET_POST_CREDITS(v)  ((v) >> 16)       186 #define IB_GET_POST_CREDITS(v)  ((v) >> 16)
229 #define IB_SET_SEND_CREDITS(v)  ((v) & 0xffff)    187 #define IB_SET_SEND_CREDITS(v)  ((v) & 0xffff)
230 #define IB_SET_POST_CREDITS(v)  ((v) << 16)       188 #define IB_SET_POST_CREDITS(v)  ((v) << 16)
231                                                   189 
232 struct rds_ib_ipaddr {                            190 struct rds_ib_ipaddr {
233         struct list_head        list;             191         struct list_head        list;
234         __be32                  ipaddr;           192         __be32                  ipaddr;
235         struct rcu_head         rcu;              193         struct rcu_head         rcu;
236 };                                                194 };
237                                                   195 
238 enum {                                            196 enum {
239         RDS_IB_MR_8K_POOL,                        197         RDS_IB_MR_8K_POOL,
240         RDS_IB_MR_1M_POOL,                        198         RDS_IB_MR_1M_POOL,
241 };                                                199 };
242                                                   200 
243 struct rds_ib_device {                            201 struct rds_ib_device {
244         struct list_head        list;             202         struct list_head        list;
245         struct list_head        ipaddr_list;      203         struct list_head        ipaddr_list;
246         struct list_head        conn_list;        204         struct list_head        conn_list;
247         struct ib_device        *dev;             205         struct ib_device        *dev;
248         struct ib_pd            *pd;              206         struct ib_pd            *pd;
249         u8                      odp_capable:1; !! 207         bool                    has_fmr;
                                                   >> 208         bool                    has_fr;
                                                   >> 209         bool                    use_fastreg;
250                                                   210 
251         unsigned int            max_mrs;          211         unsigned int            max_mrs;
252         struct rds_ib_mr_pool   *mr_1m_pool;      212         struct rds_ib_mr_pool   *mr_1m_pool;
253         struct rds_ib_mr_pool   *mr_8k_pool;      213         struct rds_ib_mr_pool   *mr_8k_pool;
                                                   >> 214         unsigned int            fmr_max_remaps;
254         unsigned int            max_8k_mrs;       215         unsigned int            max_8k_mrs;
255         unsigned int            max_1m_mrs;       216         unsigned int            max_1m_mrs;
256         int                     max_sge;          217         int                     max_sge;
257         unsigned int            max_wrs;          218         unsigned int            max_wrs;
258         unsigned int            max_initiator_    219         unsigned int            max_initiator_depth;
259         unsigned int            max_responder_    220         unsigned int            max_responder_resources;
260         spinlock_t              spinlock;         221         spinlock_t              spinlock;       /* protect the above */
261         refcount_t              refcount;      !! 222         atomic_t                refcount;
262         struct work_struct      free_work;        223         struct work_struct      free_work;
263         int                     *vector_load;  << 
264 };                                                224 };
265                                                   225 
                                                   >> 226 #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
266 #define rdsibdev_to_node(rdsibdev) ibdev_to_no    227 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
267                                                   228 
268 /* bits for i_ack_flags */                        229 /* bits for i_ack_flags */
269 #define IB_ACK_IN_FLIGHT        0                 230 #define IB_ACK_IN_FLIGHT        0
270 #define IB_ACK_REQUESTED        1                 231 #define IB_ACK_REQUESTED        1
271                                                   232 
272 /* Magic WR_ID for ACKs */                        233 /* Magic WR_ID for ACKs */
273 #define RDS_IB_ACK_WR_ID        (~(u64) 0)        234 #define RDS_IB_ACK_WR_ID        (~(u64) 0)
274                                                   235 
275 struct rds_ib_statistics {                        236 struct rds_ib_statistics {
276         uint64_t        s_ib_connect_raced;       237         uint64_t        s_ib_connect_raced;
277         uint64_t        s_ib_listen_closed_sta    238         uint64_t        s_ib_listen_closed_stale;
278         uint64_t        s_ib_evt_handler_call;    239         uint64_t        s_ib_evt_handler_call;
279         uint64_t        s_ib_tasklet_call;        240         uint64_t        s_ib_tasklet_call;
280         uint64_t        s_ib_tx_cq_event;         241         uint64_t        s_ib_tx_cq_event;
281         uint64_t        s_ib_tx_ring_full;        242         uint64_t        s_ib_tx_ring_full;
282         uint64_t        s_ib_tx_throttle;         243         uint64_t        s_ib_tx_throttle;
283         uint64_t        s_ib_tx_sg_mapping_fai    244         uint64_t        s_ib_tx_sg_mapping_failure;
284         uint64_t        s_ib_tx_stalled;          245         uint64_t        s_ib_tx_stalled;
285         uint64_t        s_ib_tx_credit_updates    246         uint64_t        s_ib_tx_credit_updates;
286         uint64_t        s_ib_rx_cq_event;         247         uint64_t        s_ib_rx_cq_event;
287         uint64_t        s_ib_rx_ring_empty;       248         uint64_t        s_ib_rx_ring_empty;
288         uint64_t        s_ib_rx_refill_from_cq    249         uint64_t        s_ib_rx_refill_from_cq;
289         uint64_t        s_ib_rx_refill_from_th    250         uint64_t        s_ib_rx_refill_from_thread;
290         uint64_t        s_ib_rx_alloc_limit;      251         uint64_t        s_ib_rx_alloc_limit;
291         uint64_t        s_ib_rx_total_frags;   << 
292         uint64_t        s_ib_rx_total_incs;    << 
293         uint64_t        s_ib_rx_credit_updates    252         uint64_t        s_ib_rx_credit_updates;
294         uint64_t        s_ib_ack_sent;            253         uint64_t        s_ib_ack_sent;
295         uint64_t        s_ib_ack_send_failure;    254         uint64_t        s_ib_ack_send_failure;
296         uint64_t        s_ib_ack_send_delayed;    255         uint64_t        s_ib_ack_send_delayed;
297         uint64_t        s_ib_ack_send_piggybac    256         uint64_t        s_ib_ack_send_piggybacked;
298         uint64_t        s_ib_ack_received;        257         uint64_t        s_ib_ack_received;
299         uint64_t        s_ib_rdma_mr_8k_alloc;    258         uint64_t        s_ib_rdma_mr_8k_alloc;
300         uint64_t        s_ib_rdma_mr_8k_free;     259         uint64_t        s_ib_rdma_mr_8k_free;
301         uint64_t        s_ib_rdma_mr_8k_used;     260         uint64_t        s_ib_rdma_mr_8k_used;
302         uint64_t        s_ib_rdma_mr_8k_pool_f    261         uint64_t        s_ib_rdma_mr_8k_pool_flush;
303         uint64_t        s_ib_rdma_mr_8k_pool_w    262         uint64_t        s_ib_rdma_mr_8k_pool_wait;
304         uint64_t        s_ib_rdma_mr_8k_pool_d    263         uint64_t        s_ib_rdma_mr_8k_pool_depleted;
305         uint64_t        s_ib_rdma_mr_1m_alloc;    264         uint64_t        s_ib_rdma_mr_1m_alloc;
306         uint64_t        s_ib_rdma_mr_1m_free;     265         uint64_t        s_ib_rdma_mr_1m_free;
307         uint64_t        s_ib_rdma_mr_1m_used;     266         uint64_t        s_ib_rdma_mr_1m_used;
308         uint64_t        s_ib_rdma_mr_1m_pool_f    267         uint64_t        s_ib_rdma_mr_1m_pool_flush;
309         uint64_t        s_ib_rdma_mr_1m_pool_w    268         uint64_t        s_ib_rdma_mr_1m_pool_wait;
310         uint64_t        s_ib_rdma_mr_1m_pool_d    269         uint64_t        s_ib_rdma_mr_1m_pool_depleted;
311         uint64_t        s_ib_rdma_mr_8k_reused    270         uint64_t        s_ib_rdma_mr_8k_reused;
312         uint64_t        s_ib_rdma_mr_1m_reused    271         uint64_t        s_ib_rdma_mr_1m_reused;
313         uint64_t        s_ib_atomic_cswp;         272         uint64_t        s_ib_atomic_cswp;
314         uint64_t        s_ib_atomic_fadd;         273         uint64_t        s_ib_atomic_fadd;
315         uint64_t        s_ib_recv_added_to_cac << 
316         uint64_t        s_ib_recv_removed_from << 
317 };                                                274 };
318                                                   275 
319 extern struct workqueue_struct *rds_ib_wq;        276 extern struct workqueue_struct *rds_ib_wq;
320                                                   277 
321 /*                                                278 /*
322  * Fake ib_dma_sync_sg_for_{cpu,device} as lon    279  * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
323  * doesn't define it.                             280  * doesn't define it.
324  */                                               281  */
325 static inline void rds_ib_dma_sync_sg_for_cpu(    282 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
326                                                   283                                               struct scatterlist *sglist,
327                                                   284                                               unsigned int sg_dma_len,
328                                                   285                                               int direction)
329 {                                                 286 {
330         struct scatterlist *sg;                   287         struct scatterlist *sg;
331         unsigned int i;                           288         unsigned int i;
332                                                   289 
333         for_each_sg(sglist, sg, sg_dma_len, i)    290         for_each_sg(sglist, sg, sg_dma_len, i) {
334                 ib_dma_sync_single_for_cpu(dev !! 291                 ib_dma_sync_single_for_cpu(dev,
335                                            sg_ !! 292                                 ib_sg_dma_address(dev, sg),
                                                   >> 293                                 ib_sg_dma_len(dev, sg),
                                                   >> 294                                 direction);
336         }                                         295         }
337 }                                                 296 }
338 #define ib_dma_sync_sg_for_cpu  rds_ib_dma_syn    297 #define ib_dma_sync_sg_for_cpu  rds_ib_dma_sync_sg_for_cpu
339                                                   298 
340 static inline void rds_ib_dma_sync_sg_for_devi    299 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
341                                                   300                                                  struct scatterlist *sglist,
342                                                   301                                                  unsigned int sg_dma_len,
343                                                   302                                                  int direction)
344 {                                                 303 {
345         struct scatterlist *sg;                   304         struct scatterlist *sg;
346         unsigned int i;                           305         unsigned int i;
347                                                   306 
348         for_each_sg(sglist, sg, sg_dma_len, i)    307         for_each_sg(sglist, sg, sg_dma_len, i) {
349                 ib_dma_sync_single_for_device( !! 308                 ib_dma_sync_single_for_device(dev,
350                                                !! 309                                 ib_sg_dma_address(dev, sg),
                                                   >> 310                                 ib_sg_dma_len(dev, sg),
                                                   >> 311                                 direction);
351         }                                         312         }
352 }                                                 313 }
353 #define ib_dma_sync_sg_for_device       rds_ib    314 #define ib_dma_sync_sg_for_device       rds_ib_dma_sync_sg_for_device
354                                                   315 
355                                                   316 
356 /* ib.c */                                        317 /* ib.c */
357 extern struct rds_transport rds_ib_transport;     318 extern struct rds_transport rds_ib_transport;
358 struct rds_ib_device *rds_ib_get_client_data(s    319 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
359 void rds_ib_dev_put(struct rds_ib_device *rds_    320 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
360 extern struct ib_client rds_ib_client;            321 extern struct ib_client rds_ib_client;
361                                                   322 
362 extern unsigned int rds_ib_retry_count;           323 extern unsigned int rds_ib_retry_count;
363                                                   324 
364 extern spinlock_t ib_nodev_conns_lock;            325 extern spinlock_t ib_nodev_conns_lock;
365 extern struct list_head ib_nodev_conns;           326 extern struct list_head ib_nodev_conns;
366                                                   327 
367 /* ib_cm.c */                                     328 /* ib_cm.c */
368 int rds_ib_conn_alloc(struct rds_connection *c    329 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
369 void rds_ib_conn_free(void *arg);                 330 void rds_ib_conn_free(void *arg);
370 int rds_ib_conn_path_connect(struct rds_conn_p    331 int rds_ib_conn_path_connect(struct rds_conn_path *cp);
371 void rds_ib_conn_path_shutdown(struct rds_conn    332 void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
                                                   >> 333 void rds_ib_state_change(struct sock *sk);
                                                   >> 334 int rds_ib_listen_init(void);
                                                   >> 335 void rds_ib_listen_stop(void);
372 __printf(2, 3)                                    336 __printf(2, 3)
373 void __rds_ib_conn_error(struct rds_connection    337 void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
374 int rds_ib_cm_handle_connect(struct rdma_cm_id    338 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
375                              struct rdma_cm_ev !! 339                              struct rdma_cm_event *event);
376 int rds_ib_cm_initiate_connect(struct rdma_cm_ !! 340 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
377 void rds_ib_cm_connect_complete(struct rds_con    341 void rds_ib_cm_connect_complete(struct rds_connection *conn,
378                                 struct rdma_cm    342                                 struct rdma_cm_event *event);
379                                                   343 
                                                   >> 344 
380 #define rds_ib_conn_error(conn, fmt...) \         345 #define rds_ib_conn_error(conn, fmt...) \
381         __rds_ib_conn_error(conn, KERN_WARNING    346         __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
382                                                   347 
383 /* ib_rdma.c */                                   348 /* ib_rdma.c */
384 int rds_ib_update_ipaddr(struct rds_ib_device  !! 349 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
385                          struct in6_addr *ipad << 
386 void rds_ib_add_conn(struct rds_ib_device *rds    350 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
387 void rds_ib_remove_conn(struct rds_ib_device *    351 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
388 void rds_ib_destroy_nodev_conns(void);            352 void rds_ib_destroy_nodev_conns(void);
389 void rds_ib_mr_cqe_handler(struct rds_ib_conne    353 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
390                                                   354 
391 /* ib_recv.c */                                   355 /* ib_recv.c */
392 int rds_ib_recv_init(void);                       356 int rds_ib_recv_init(void);
393 void rds_ib_recv_exit(void);                      357 void rds_ib_recv_exit(void);
394 int rds_ib_recv_path(struct rds_conn_path *con    358 int rds_ib_recv_path(struct rds_conn_path *conn);
395 int rds_ib_recv_alloc_caches(struct rds_ib_con !! 359 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
396 void rds_ib_recv_free_caches(struct rds_ib_con    360 void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
397 void rds_ib_recv_refill(struct rds_connection     361 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
398 void rds_ib_inc_free(struct rds_incoming *inc)    362 void rds_ib_inc_free(struct rds_incoming *inc);
399 int rds_ib_inc_copy_to_user(struct rds_incomin    363 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
400 void rds_ib_recv_cqe_handler(struct rds_ib_con    364 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
401                              struct rds_ib_ack    365                              struct rds_ib_ack_state *state);
                                                   >> 366 void rds_ib_recv_tasklet_fn(unsigned long data);
402 void rds_ib_recv_init_ring(struct rds_ib_conne    367 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
403 void rds_ib_recv_clear_ring(struct rds_ib_conn    368 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
404 void rds_ib_recv_init_ack(struct rds_ib_connec    369 void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
405 void rds_ib_attempt_ack(struct rds_ib_connecti    370 void rds_ib_attempt_ack(struct rds_ib_connection *ic);
406 void rds_ib_ack_send_complete(struct rds_ib_co    371 void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
407 u64 rds_ib_piggyb_ack(struct rds_ib_connection    372 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
408 void rds_ib_set_ack(struct rds_ib_connection *    373 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
409                                                   374 
410 /* ib_ring.c */                                   375 /* ib_ring.c */
411 void rds_ib_ring_init(struct rds_ib_work_ring     376 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
412 void rds_ib_ring_resize(struct rds_ib_work_rin    377 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
413 u32 rds_ib_ring_alloc(struct rds_ib_work_ring     378 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
414 void rds_ib_ring_free(struct rds_ib_work_ring     379 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
415 void rds_ib_ring_unalloc(struct rds_ib_work_ri    380 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
416 int rds_ib_ring_empty(struct rds_ib_work_ring     381 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
417 int rds_ib_ring_low(struct rds_ib_work_ring *r    382 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
418 u32 rds_ib_ring_oldest(struct rds_ib_work_ring    383 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
419 u32 rds_ib_ring_completed(struct rds_ib_work_r    384 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
420 extern wait_queue_head_t rds_ib_ring_empty_wai    385 extern wait_queue_head_t rds_ib_ring_empty_wait;
421                                                   386 
422 /* ib_send.c */                                   387 /* ib_send.c */
423 void rds_ib_xmit_path_complete(struct rds_conn    388 void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
424 int rds_ib_xmit(struct rds_connection *conn, s    389 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
425                 unsigned int hdr_off, unsigned    390                 unsigned int hdr_off, unsigned int sg, unsigned int off);
426 void rds_ib_send_cqe_handler(struct rds_ib_con    391 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
427 void rds_ib_send_init_ring(struct rds_ib_conne    392 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
428 void rds_ib_send_clear_ring(struct rds_ib_conn    393 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
429 int rds_ib_xmit_rdma(struct rds_connection *co    394 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
430 void rds_ib_send_add_credits(struct rds_connec    395 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
431 void rds_ib_advertise_credits(struct rds_conne    396 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
432 int rds_ib_send_grab_credits(struct rds_ib_con    397 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
433                              u32 *adv_credits,    398                              u32 *adv_credits, int need_posted, int max_posted);
434 int rds_ib_xmit_atomic(struct rds_connection *    399 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
435                                                   400 
436 /* ib_stats.c */                                  401 /* ib_stats.c */
437 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_s !! 402 DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
438 #define rds_ib_stats_inc(member) rds_stats_inc    403 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
439 #define rds_ib_stats_add(member, count) \      << 
440                 rds_stats_add_which(rds_ib_sta << 
441 unsigned int rds_ib_stats_info_copy(struct rds    404 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
442                                     unsigned i    405                                     unsigned int avail);
443                                                   406 
444 /* ib_sysctl.c */                                 407 /* ib_sysctl.c */
445 int rds_ib_sysctl_init(void);                     408 int rds_ib_sysctl_init(void);
446 void rds_ib_sysctl_exit(void);                    409 void rds_ib_sysctl_exit(void);
447 extern unsigned long rds_ib_sysctl_max_send_wr    410 extern unsigned long rds_ib_sysctl_max_send_wr;
448 extern unsigned long rds_ib_sysctl_max_recv_wr    411 extern unsigned long rds_ib_sysctl_max_recv_wr;
449 extern unsigned long rds_ib_sysctl_max_unsig_w    412 extern unsigned long rds_ib_sysctl_max_unsig_wrs;
450 extern unsigned long rds_ib_sysctl_max_unsig_b    413 extern unsigned long rds_ib_sysctl_max_unsig_bytes;
451 extern unsigned long rds_ib_sysctl_max_recv_al    414 extern unsigned long rds_ib_sysctl_max_recv_allocation;
452 extern unsigned int rds_ib_sysctl_flow_control    415 extern unsigned int rds_ib_sysctl_flow_control;
453                                                   416 
454 #endif                                            417 #endif
455                                                   418 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php