1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 3 * Shared Memory Communications over RDMA (SMC 4 * 5 * Macros for SMC statistics 6 * 7 * Copyright IBM Corp. 2021 8 * 9 * Author(s): Guvenc Gulce 10 */ 11 12 #ifndef NET_SMC_SMC_STATS_H_ 13 #define NET_SMC_SMC_STATS_H_ 14 #include <linux/init.h> 15 #include <linux/mutex.h> 16 #include <linux/percpu.h> 17 #include <linux/ctype.h> 18 #include <linux/smc.h> 19 20 #include "smc_clc.h" 21 22 #define SMC_MAX_FBACK_RSN_CNT 36 23 24 enum { 25 SMC_BUF_8K, 26 SMC_BUF_16K, 27 SMC_BUF_32K, 28 SMC_BUF_64K, 29 SMC_BUF_128K, 30 SMC_BUF_256K, 31 SMC_BUF_512K, 32 SMC_BUF_1024K, 33 SMC_BUF_G_1024K, 34 SMC_BUF_MAX, 35 }; 36 37 struct smc_stats_fback { 38 int fback_code; 39 u16 count; 40 }; 41 42 struct smc_stats_rsn { 43 struct smc_stats_fback srv[SMC_MAX_FB 44 struct smc_stats_fback clnt[SMC_MAX_F 45 u64 srv_fback_cnt; 46 u64 clnt_fback_cnt 47 }; 48 49 struct smc_stats_rmbcnt { 50 u64 buf_size_small_peer_cnt; 51 u64 buf_size_small_cnt; 52 u64 buf_full_peer_cnt; 53 u64 buf_full_cnt; 54 u64 reuse_cnt; 55 u64 alloc_cnt; 56 u64 dgrade_cnt; 57 }; 58 59 struct smc_stats_memsize { 60 u64 buf[SMC_BUF_MAX]; 61 }; 62 63 struct smc_stats_tech { 64 struct smc_stats_memsize tx_rmbsize; 65 struct smc_stats_memsize rx_rmbsize; 66 struct smc_stats_memsize tx_pd; 67 struct smc_stats_memsize rx_pd; 68 struct smc_stats_rmbcnt rmb_tx; 69 struct smc_stats_rmbcnt rmb_rx; 70 u64 clnt_v1_succ_c 71 u64 clnt_v2_succ_c 72 u64 srv_v1_succ_cn 73 u64 srv_v2_succ_cn 74 u64 urg_data_cnt; 75 u64 splice_cnt; 76 u64 cork_cnt; 77 u64 ndly_cnt; 78 u64 rx_bytes; 79 u64 tx_bytes; 80 u64 rx_cnt; 81 u64 tx_cnt; 82 u64 rx_rmbuse; 83 u64 tx_rmbuse; 84 }; 85 86 struct smc_stats { 87 struct smc_stats_tech smc[2]; 88 u64 clnt_hshake_er 89 u64 srv_hshake_err 90 }; 91 92 #define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech 93 do { \ 94 typeof(_smc_stats) stats = (_smc_stats 95 typeof(_tech) t = (_tech); \ 96 typeof(_len) l = (_len); \ 97 int _pos; \ 98 typeof(_rc) r = (_rc); \ 99 int m = SMC_BUF_MAX - 1; \ 100 this_cpu_inc((*stats).smc[t].key ## _c 101 if (r <= 0 || l <= 0) \ 102 break; \ 103 _pos = fls64((l - 1) >> 13); \ 104 _pos = (_pos <= m) ? _pos : m; \ 105 this_cpu_inc((*stats).smc[t].key ## _p 106 this_cpu_add((*stats).smc[t].key ## _b 107 } \ 108 while (0) 109 110 #define SMC_STAT_TX_PAYLOAD(_smc, length, rcod 111 do { \ 112 typeof(_smc) __smc = _smc; \ 113 struct net *_net = sock_net(&__smc->sk 114 struct smc_stats __percpu *_smc_stats 115 typeof(length) _len = (length); \ 116 typeof(rcode) _rc = (rcode); \ 117 bool is_smcd = !__smc->conn.lnk; \ 118 if (is_smcd) \ 119 SMC_STAT_PAYLOAD_SUB(_smc_stat 120 else \ 121 SMC_STAT_PAYLOAD_SUB(_smc_stat 122 } \ 123 while (0) 124 125 #define SMC_STAT_RX_PAYLOAD(_smc, length, rcod 126 do { \ 127 typeof(_smc) __smc = _smc; \ 128 struct net *_net = sock_net(&__smc->sk 129 struct smc_stats __percpu *_smc_stats 130 typeof(length) _len = (length); \ 131 typeof(rcode) _rc = (rcode); \ 132 bool is_smcd = !__smc->conn.lnk; \ 133 if (is_smcd) \ 134 SMC_STAT_PAYLOAD_SUB(_smc_stat 135 else \ 136 SMC_STAT_PAYLOAD_SUB(_smc_stat 137 } \ 138 while (0) 139 140 #define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tec 141 do { \ 142 typeof(_smc_stats) stats = (_smc_stats 143 typeof(_is_add) is_a = (_is_add); \ 144 typeof(_len) _l = (_len); \ 145 typeof(_tech) t = (_tech); \ 146 int _pos; \ 147 int m = SMC_BUF_MAX - 1; \ 148 if (_l <= 0) \ 149 break; \ 150 if (is_a) { \ 151 _pos = fls((_l - 1) >> 13); \ 152 _pos = (_pos <= m) ? _pos : m; 153 this_cpu_inc((*stats).smc[t].k 154 this_cpu_add((*stats).smc[t].k 155 } else { \ 156 this_cpu_sub((*stats).smc[t].k 157 } \ 158 } \ 159 while (0) 160 161 #define SMC_STAT_RMB_SUB(_smc_stats, type, t, 162 this_cpu_inc((*(_smc_stats)).smc[t].rm 163 164 #define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_ 165 do { \ 166 struct net *_net = sock_net(&(_smc)->s 167 struct smc_stats __percpu *_smc_stats 168 typeof(_is_add) is_add = (_is_add); \ 169 typeof(_is_smcd) is_d = (_is_smcd); \ 170 typeof(_is_rx) is_r = (_is_rx); \ 171 typeof(_len) l = (_len); \ 172 if ((is_d) && (is_r)) \ 173 SMC_STAT_RMB_SIZE_SUB(_smc_sta 174 if ((is_d) && !(is_r)) \ 175 SMC_STAT_RMB_SIZE_SUB(_smc_sta 176 if (!(is_d) && (is_r)) \ 177 SMC_STAT_RMB_SIZE_SUB(_smc_sta 178 if (!(is_d) && !(is_r)) \ 179 SMC_STAT_RMB_SIZE_SUB(_smc_sta 180 } \ 181 while (0) 182 183 #define SMC_STAT_RMB(_smc, type, _is_smcd, _is 184 do { \ 185 struct net *net = sock_net(&(_smc)->sk 186 struct smc_stats __percpu *_smc_stats 187 typeof(_is_smcd) is_d = (_is_smcd); \ 188 typeof(_is_rx) is_r = (_is_rx); \ 189 if ((is_d) && (is_r)) \ 190 SMC_STAT_RMB_SUB(_smc_stats, t 191 if ((is_d) && !(is_r)) \ 192 SMC_STAT_RMB_SUB(_smc_stats, t 193 if (!(is_d) && (is_r)) \ 194 SMC_STAT_RMB_SUB(_smc_stats, t 195 if (!(is_d) && !(is_r)) \ 196 SMC_STAT_RMB_SUB(_smc_stats, t 197 } \ 198 while (0) 199 200 #define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx 201 SMC_STAT_RMB(smc, reuse, is_smcd, is_r 202 203 #define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx 204 SMC_STAT_RMB(smc, alloc, is_smcd, is_r 205 206 #define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, 207 SMC_STAT_RMB(smc, dgrade, is_smcd, is_ 208 209 #define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd 210 SMC_STAT_RMB(smc, buf_full_peer, is_sm 211 212 #define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \ 213 SMC_STAT_RMB(smc, buf_full, is_smcd, f 214 215 #define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, i 216 SMC_STAT_RMB(smc, buf_size_small_peer, 217 218 #define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smc 219 SMC_STAT_RMB(smc, buf_size_small, is_s 220 221 #define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smc 222 SMC_STAT_RMB(smc, buf_size_small, is_s 223 224 #define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \ 225 SMC_STAT_RMB(smc, buf_full, is_smcd, t 226 227 #define SMC_STAT_INC(_smc, type) \ 228 do { \ 229 typeof(_smc) __smc = _smc; \ 230 bool is_smcd = !(__smc)->conn.lnk; \ 231 struct net *net = sock_net(&(__smc)->s 232 struct smc_stats __percpu *smc_stats = 233 if ((is_smcd)) \ 234 this_cpu_inc(smc_stats->smc[SM 235 else \ 236 this_cpu_inc(smc_stats->smc[SM 237 } \ 238 while (0) 239 240 #define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \ 241 do { \ 242 typeof(_aclc) acl = (_aclc); \ 243 bool is_v2 = (acl->hdr.version == SMC_ 244 bool is_smcd = (acl->hdr.typev1 == SMC 245 struct smc_stats __percpu *smc_stats = 246 if (is_v2 && is_smcd) \ 247 this_cpu_inc(smc_stats->smc[SM 248 else if (is_v2 && !is_smcd) \ 249 this_cpu_inc(smc_stats->smc[SM 250 else if (!is_v2 && is_smcd) \ 251 this_cpu_inc(smc_stats->smc[SM 252 else if (!is_v2 && !is_smcd) \ 253 this_cpu_inc(smc_stats->smc[SM 254 } \ 255 while (0) 256 257 #define SMC_STAT_SERV_SUCC_INC(net, _ini) \ 258 do { \ 259 typeof(_ini) i = (_ini); \ 260 bool is_smcd = (i->is_smcd); \ 261 u8 version = is_smcd ? i->smcd_version 262 bool is_v2 = (version & SMC_V2); \ 263 typeof(net->smc.smc_stats) smc_stats = 264 if (is_v2 && is_smcd) \ 265 this_cpu_inc(smc_stats->smc[SM 266 else if (is_v2 && !is_smcd) \ 267 this_cpu_inc(smc_stats->smc[SM 268 else if (!is_v2 && is_smcd) \ 269 this_cpu_inc(smc_stats->smc[SM 270 else if (!is_v2 && !is_smcd) \ 271 this_cpu_inc(smc_stats->smc[SM 272 } \ 273 while (0) 274 275 int smc_nl_get_stats(struct sk_buff *skb, stru 276 int smc_nl_get_fback_stats(struct sk_buff *skb 277 int smc_stats_init(struct net *net); 278 void smc_stats_exit(struct net *net); 279 280 #endif /* NET_SMC_SMC_STATS_H_ */ 281
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.