~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/smc/smc_loopback.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *  Shared Memory Communications Direct over loopback-ism device.
  4  *
  5  *  Functions for loopback-ism device.
  6  *
  7  *  Copyright (c) 2024, Alibaba Inc.
  8  *
  9  *  Author: Wen Gu <guwen@linux.alibaba.com>
 10  *          Tony Lu <tonylu@linux.alibaba.com>
 11  *
 12  */
 13 
 14 #include <linux/device.h>
 15 #include <linux/types.h>
 16 #include <net/smc.h>
 17 
 18 #include "smc_cdc.h"
 19 #include "smc_ism.h"
 20 #include "smc_loopback.h"
 21 
 22 #define SMC_LO_V2_CAPABLE       0x1 /* loopback-ism acts as ISMv2 */
 23 #define SMC_LO_SUPPORT_NOCOPY   0x1
 24 #define SMC_DMA_ADDR_INVALID    (~(dma_addr_t)0)
 25 
 26 static const char smc_lo_dev_name[] = "loopback-ism";
 27 static struct smc_lo_dev *lo_dev;
 28 
 29 static void smc_lo_generate_ids(struct smc_lo_dev *ldev)
 30 {
 31         struct smcd_gid *lgid = &ldev->local_gid;
 32         uuid_t uuid;
 33 
 34         uuid_gen(&uuid);
 35         memcpy(&lgid->gid, &uuid, sizeof(lgid->gid));
 36         memcpy(&lgid->gid_ext, (u8 *)&uuid + sizeof(lgid->gid),
 37                sizeof(lgid->gid_ext));
 38 
 39         ldev->chid = SMC_LO_RESERVED_CHID;
 40 }
 41 
 42 static int smc_lo_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
 43                              u32 vid_valid, u32 vid)
 44 {
 45         struct smc_lo_dev *ldev = smcd->priv;
 46 
 47         /* rgid should be the same as lgid */
 48         if (!ldev || rgid->gid != ldev->local_gid.gid ||
 49             rgid->gid_ext != ldev->local_gid.gid_ext)
 50                 return -ENETUNREACH;
 51         return 0;
 52 }
 53 
 54 static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
 55                                void *client_priv)
 56 {
 57         struct smc_lo_dmb_node *dmb_node, *tmp_node;
 58         struct smc_lo_dev *ldev = smcd->priv;
 59         int sba_idx, rc;
 60 
 61         /* check space for new dmb */
 62         for_each_clear_bit(sba_idx, ldev->sba_idx_mask, SMC_LO_MAX_DMBS) {
 63                 if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
 64                         break;
 65         }
 66         if (sba_idx == SMC_LO_MAX_DMBS)
 67                 return -ENOSPC;
 68 
 69         dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
 70         if (!dmb_node) {
 71                 rc = -ENOMEM;
 72                 goto err_bit;
 73         }
 74 
 75         dmb_node->sba_idx = sba_idx;
 76         dmb_node->len = dmb->dmb_len;
 77         dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
 78                                      __GFP_NOWARN | __GFP_NORETRY |
 79                                      __GFP_NOMEMALLOC);
 80         if (!dmb_node->cpu_addr) {
 81                 rc = -ENOMEM;
 82                 goto err_node;
 83         }
 84         dmb_node->dma_addr = SMC_DMA_ADDR_INVALID;
 85         refcount_set(&dmb_node->refcnt, 1);
 86 
 87 again:
 88         /* add new dmb into hash table */
 89         get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
 90         write_lock_bh(&ldev->dmb_ht_lock);
 91         hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
 92                 if (tmp_node->token == dmb_node->token) {
 93                         write_unlock_bh(&ldev->dmb_ht_lock);
 94                         goto again;
 95                 }
 96         }
 97         hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
 98         write_unlock_bh(&ldev->dmb_ht_lock);
 99         atomic_inc(&ldev->dmb_cnt);
100 
101         dmb->sba_idx = dmb_node->sba_idx;
102         dmb->dmb_tok = dmb_node->token;
103         dmb->cpu_addr = dmb_node->cpu_addr;
104         dmb->dma_addr = dmb_node->dma_addr;
105         dmb->dmb_len = dmb_node->len;
106 
107         return 0;
108 
109 err_node:
110         kfree(dmb_node);
111 err_bit:
112         clear_bit(sba_idx, ldev->sba_idx_mask);
113         return rc;
114 }
115 
116 static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev,
117                                     struct smc_lo_dmb_node *dmb_node)
118 {
119         /* remove dmb from hash table */
120         write_lock_bh(&ldev->dmb_ht_lock);
121         hash_del(&dmb_node->list);
122         write_unlock_bh(&ldev->dmb_ht_lock);
123 
124         clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
125         kvfree(dmb_node->cpu_addr);
126         kfree(dmb_node);
127 
128         if (atomic_dec_and_test(&ldev->dmb_cnt))
129                 wake_up(&ldev->ldev_release);
130 }
131 
132 static int smc_lo_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
133 {
134         struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
135         struct smc_lo_dev *ldev = smcd->priv;
136 
137         /* find dmb from hash table */
138         read_lock_bh(&ldev->dmb_ht_lock);
139         hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
140                 if (tmp_node->token == dmb->dmb_tok) {
141                         dmb_node = tmp_node;
142                         break;
143                 }
144         }
145         if (!dmb_node) {
146                 read_unlock_bh(&ldev->dmb_ht_lock);
147                 return -EINVAL;
148         }
149         read_unlock_bh(&ldev->dmb_ht_lock);
150 
151         if (refcount_dec_and_test(&dmb_node->refcnt))
152                 __smc_lo_unregister_dmb(ldev, dmb_node);
153         return 0;
154 }
155 
156 static int smc_lo_support_dmb_nocopy(struct smcd_dev *smcd)
157 {
158         return SMC_LO_SUPPORT_NOCOPY;
159 }
160 
161 static int smc_lo_attach_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
162 {
163         struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
164         struct smc_lo_dev *ldev = smcd->priv;
165 
166         /* find dmb_node according to dmb->dmb_tok */
167         read_lock_bh(&ldev->dmb_ht_lock);
168         hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
169                 if (tmp_node->token == dmb->dmb_tok) {
170                         dmb_node = tmp_node;
171                         break;
172                 }
173         }
174         if (!dmb_node) {
175                 read_unlock_bh(&ldev->dmb_ht_lock);
176                 return -EINVAL;
177         }
178         read_unlock_bh(&ldev->dmb_ht_lock);
179 
180         if (!refcount_inc_not_zero(&dmb_node->refcnt))
181                 /* the dmb is being unregistered, but has
182                  * not been removed from the hash table.
183                  */
184                 return -EINVAL;
185 
186         /* provide dmb information */
187         dmb->sba_idx = dmb_node->sba_idx;
188         dmb->dmb_tok = dmb_node->token;
189         dmb->cpu_addr = dmb_node->cpu_addr;
190         dmb->dma_addr = dmb_node->dma_addr;
191         dmb->dmb_len = dmb_node->len;
192         return 0;
193 }
194 
195 static int smc_lo_detach_dmb(struct smcd_dev *smcd, u64 token)
196 {
197         struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
198         struct smc_lo_dev *ldev = smcd->priv;
199 
200         /* find dmb_node according to dmb->dmb_tok */
201         read_lock_bh(&ldev->dmb_ht_lock);
202         hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
203                 if (tmp_node->token == token) {
204                         dmb_node = tmp_node;
205                         break;
206                 }
207         }
208         if (!dmb_node) {
209                 read_unlock_bh(&ldev->dmb_ht_lock);
210                 return -EINVAL;
211         }
212         read_unlock_bh(&ldev->dmb_ht_lock);
213 
214         if (refcount_dec_and_test(&dmb_node->refcnt))
215                 __smc_lo_unregister_dmb(ldev, dmb_node);
216         return 0;
217 }
218 
219 static int smc_lo_move_data(struct smcd_dev *smcd, u64 dmb_tok,
220                             unsigned int idx, bool sf, unsigned int offset,
221                             void *data, unsigned int size)
222 {
223         struct smc_lo_dmb_node *rmb_node = NULL, *tmp_node;
224         struct smc_lo_dev *ldev = smcd->priv;
225         struct smc_connection *conn;
226 
227         if (!sf)
228                 /* since sndbuf is merged with peer DMB, there is
229                  * no need to copy data from sndbuf to peer DMB.
230                  */
231                 return 0;
232 
233         read_lock_bh(&ldev->dmb_ht_lock);
234         hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
235                 if (tmp_node->token == dmb_tok) {
236                         rmb_node = tmp_node;
237                         break;
238                 }
239         }
240         if (!rmb_node) {
241                 read_unlock_bh(&ldev->dmb_ht_lock);
242                 return -EINVAL;
243         }
244         memcpy((char *)rmb_node->cpu_addr + offset, data, size);
245         read_unlock_bh(&ldev->dmb_ht_lock);
246 
247         conn = smcd->conn[rmb_node->sba_idx];
248         if (!conn || conn->killed)
249                 return -EPIPE;
250         tasklet_schedule(&conn->rx_tsklet);
251         return 0;
252 }
253 
254 static int smc_lo_supports_v2(void)
255 {
256         return SMC_LO_V2_CAPABLE;
257 }
258 
259 static void smc_lo_get_local_gid(struct smcd_dev *smcd,
260                                  struct smcd_gid *smcd_gid)
261 {
262         struct smc_lo_dev *ldev = smcd->priv;
263 
264         smcd_gid->gid = ldev->local_gid.gid;
265         smcd_gid->gid_ext = ldev->local_gid.gid_ext;
266 }
267 
268 static u16 smc_lo_get_chid(struct smcd_dev *smcd)
269 {
270         return ((struct smc_lo_dev *)smcd->priv)->chid;
271 }
272 
273 static struct device *smc_lo_get_dev(struct smcd_dev *smcd)
274 {
275         return &((struct smc_lo_dev *)smcd->priv)->dev;
276 }
277 
278 static const struct smcd_ops lo_ops = {
279         .query_remote_gid = smc_lo_query_rgid,
280         .register_dmb = smc_lo_register_dmb,
281         .unregister_dmb = smc_lo_unregister_dmb,
282         .support_dmb_nocopy = smc_lo_support_dmb_nocopy,
283         .attach_dmb = smc_lo_attach_dmb,
284         .detach_dmb = smc_lo_detach_dmb,
285         .add_vlan_id            = NULL,
286         .del_vlan_id            = NULL,
287         .set_vlan_required      = NULL,
288         .reset_vlan_required    = NULL,
289         .signal_event           = NULL,
290         .move_data = smc_lo_move_data,
291         .supports_v2 = smc_lo_supports_v2,
292         .get_local_gid = smc_lo_get_local_gid,
293         .get_chid = smc_lo_get_chid,
294         .get_dev = smc_lo_get_dev,
295 };
296 
297 static struct smcd_dev *smcd_lo_alloc_dev(const struct smcd_ops *ops,
298                                           int max_dmbs)
299 {
300         struct smcd_dev *smcd;
301 
302         smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
303         if (!smcd)
304                 return NULL;
305 
306         smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
307                              GFP_KERNEL);
308         if (!smcd->conn)
309                 goto out_smcd;
310 
311         smcd->ops = ops;
312 
313         spin_lock_init(&smcd->lock);
314         spin_lock_init(&smcd->lgr_lock);
315         INIT_LIST_HEAD(&smcd->vlan);
316         INIT_LIST_HEAD(&smcd->lgr_list);
317         init_waitqueue_head(&smcd->lgrs_deleted);
318         return smcd;
319 
320 out_smcd:
321         kfree(smcd);
322         return NULL;
323 }
324 
325 static int smcd_lo_register_dev(struct smc_lo_dev *ldev)
326 {
327         struct smcd_dev *smcd;
328 
329         smcd = smcd_lo_alloc_dev(&lo_ops, SMC_LO_MAX_DMBS);
330         if (!smcd)
331                 return -ENOMEM;
332         ldev->smcd = smcd;
333         smcd->priv = ldev;
334         smc_ism_set_v2_capable();
335         mutex_lock(&smcd_dev_list.mutex);
336         list_add(&smcd->list, &smcd_dev_list.list);
337         mutex_unlock(&smcd_dev_list.mutex);
338         pr_warn_ratelimited("smc: adding smcd device %s\n",
339                             dev_name(&ldev->dev));
340         return 0;
341 }
342 
343 static void smcd_lo_unregister_dev(struct smc_lo_dev *ldev)
344 {
345         struct smcd_dev *smcd = ldev->smcd;
346 
347         pr_warn_ratelimited("smc: removing smcd device %s\n",
348                             dev_name(&ldev->dev));
349         smcd->going_away = 1;
350         smc_smcd_terminate_all(smcd);
351         mutex_lock(&smcd_dev_list.mutex);
352         list_del_init(&smcd->list);
353         mutex_unlock(&smcd_dev_list.mutex);
354         kfree(smcd->conn);
355         kfree(smcd);
356 }
357 
358 static int smc_lo_dev_init(struct smc_lo_dev *ldev)
359 {
360         smc_lo_generate_ids(ldev);
361         rwlock_init(&ldev->dmb_ht_lock);
362         hash_init(ldev->dmb_ht);
363         atomic_set(&ldev->dmb_cnt, 0);
364         init_waitqueue_head(&ldev->ldev_release);
365 
366         return smcd_lo_register_dev(ldev);
367 }
368 
369 static void smc_lo_dev_exit(struct smc_lo_dev *ldev)
370 {
371         smcd_lo_unregister_dev(ldev);
372         if (atomic_read(&ldev->dmb_cnt))
373                 wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
374 }
375 
376 static void smc_lo_dev_release(struct device *dev)
377 {
378         struct smc_lo_dev *ldev =
379                 container_of(dev, struct smc_lo_dev, dev);
380 
381         kfree(ldev);
382 }
383 
384 static int smc_lo_dev_probe(void)
385 {
386         struct smc_lo_dev *ldev;
387         int ret;
388 
389         ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
390         if (!ldev)
391                 return -ENOMEM;
392 
393         ldev->dev.parent = NULL;
394         ldev->dev.release = smc_lo_dev_release;
395         device_initialize(&ldev->dev);
396         dev_set_name(&ldev->dev, smc_lo_dev_name);
397 
398         ret = smc_lo_dev_init(ldev);
399         if (ret)
400                 goto free_dev;
401 
402         lo_dev = ldev; /* global loopback device */
403         return 0;
404 
405 free_dev:
406         put_device(&ldev->dev);
407         return ret;
408 }
409 
410 static void smc_lo_dev_remove(void)
411 {
412         if (!lo_dev)
413                 return;
414 
415         smc_lo_dev_exit(lo_dev);
416         put_device(&lo_dev->dev); /* device_initialize in smc_lo_dev_probe */
417 }
418 
419 int smc_loopback_init(void)
420 {
421         return smc_lo_dev_probe();
422 }
423 
424 void smc_loopback_exit(void)
425 {
426         smc_lo_dev_remove();
427 }
428 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php