1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * virtio transport for vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s 10 * early virtio-vsock proof-of-concept bits. 11 */ 12 #include <linux/spinlock.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/atomic.h> 16 #include <linux/virtio.h> 17 #include <linux/virtio_ids.h> 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_vsock.h> 20 #include <net/sock.h> 21 #include <linux/mutex.h> 22 #include <net/af_vsock.h> 23 24 static struct workqueue_struct *virtio_vsock_workqueue; 25 static struct virtio_vsock __rcu *the_virtio_vsock; 26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ 27 static struct virtio_transport virtio_transport; /* forward declaration */ 28 29 struct virtio_vsock { 30 struct virtio_device *vdev; 31 struct virtqueue *vqs[VSOCK_VQ_MAX]; 32 33 /* Virtqueue processing is deferred to a workqueue */ 34 struct work_struct tx_work; 35 struct work_struct rx_work; 36 struct work_struct event_work; 37 38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] 39 * must be accessed with tx_lock held. 40 */ 41 struct mutex tx_lock; 42 bool tx_run; 43 44 struct work_struct send_pkt_work; 45 struct sk_buff_head send_pkt_queue; 46 47 atomic_t queued_replies; 48 49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] 50 * must be accessed with rx_lock held. 51 */ 52 struct mutex rx_lock; 53 bool rx_run; 54 int rx_buf_nr; 55 int rx_buf_max_nr; 56 57 /* The following fields are protected by event_lock. 58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. 59 */ 60 struct mutex event_lock; 61 bool event_run; 62 struct virtio_vsock_event event_list[8]; 63 64 u32 guest_cid; 65 bool seqpacket_allow; 66 67 /* These fields are used only in tx path in function 68 * 'virtio_transport_send_pkt_work()', so to save 69 * stack space in it, place both of them here. Each 70 * pointer from 'out_sgs' points to the corresponding 71 * element in 'out_bufs' - this is initialized in 72 * 'virtio_vsock_probe()'. Both fields are protected 73 * by 'tx_lock'. +1 is needed for packet header. 74 */ 75 struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1]; 76 struct scatterlist out_bufs[MAX_SKB_FRAGS + 1]; 77 }; 78 79 static u32 virtio_transport_get_local_cid(void) 80 { 81 struct virtio_vsock *vsock; 82 u32 ret; 83 84 rcu_read_lock(); 85 vsock = rcu_dereference(the_virtio_vsock); 86 if (!vsock) { 87 ret = VMADDR_CID_ANY; 88 goto out_rcu; 89 } 90 91 ret = vsock->guest_cid; 92 out_rcu: 93 rcu_read_unlock(); 94 return ret; 95 } 96 97 static void 98 virtio_transport_send_pkt_work(struct work_struct *work) 99 { 100 struct virtio_vsock *vsock = 101 container_of(work, struct virtio_vsock, send_pkt_work); 102 struct virtqueue *vq; 103 bool added = false; 104 bool restart_rx = false; 105 106 mutex_lock(&vsock->tx_lock); 107 108 if (!vsock->tx_run) 109 goto out; 110 111 vq = vsock->vqs[VSOCK_VQ_TX]; 112 113 for (;;) { 114 int ret, in_sg = 0, out_sg = 0; 115 struct scatterlist **sgs; 116 struct sk_buff *skb; 117 bool reply; 118 119 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); 120 if (!skb) 121 break; 122 123 reply = virtio_vsock_skb_reply(skb); 124 sgs = vsock->out_sgs; 125 sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb), 126 sizeof(*virtio_vsock_hdr(skb))); 127 out_sg++; 128 129 if (!skb_is_nonlinear(skb)) { 130 if (skb->len > 0) { 131 sg_init_one(sgs[out_sg], skb->data, skb->len); 132 out_sg++; 133 } 134 } else { 135 struct skb_shared_info *si; 136 int i; 137 138 /* If skb is nonlinear, then its buffer must contain 139 * only header and nothing more. Data is stored in 140 * the fragged part. 141 */ 142 WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb))); 143 144 si = skb_shinfo(skb); 145 146 for (i = 0; i < si->nr_frags; i++) { 147 skb_frag_t *skb_frag = &si->frags[i]; 148 void *va; 149 150 /* We will use 'page_to_virt()' for the userspace page 151 * here, because virtio or dma-mapping layers will call 152 * 'virt_to_phys()' later to fill the buffer descriptor. 153 * We don't touch memory at "virtual" address of this page. 154 */ 155 va = page_to_virt(skb_frag_page(skb_frag)); 156 sg_init_one(sgs[out_sg], 157 va + skb_frag_off(skb_frag), 158 skb_frag_size(skb_frag)); 159 out_sg++; 160 } 161 } 162 163 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL); 164 /* Usually this means that there is no more space available in 165 * the vq 166 */ 167 if (ret < 0) { 168 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); 169 break; 170 } 171 172 virtio_transport_deliver_tap_pkt(skb); 173 174 if (reply) { 175 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 176 int val; 177 178 val = atomic_dec_return(&vsock->queued_replies); 179 180 /* Do we now have resources to resume rx processing? */ 181 if (val + 1 == virtqueue_get_vring_size(rx_vq)) 182 restart_rx = true; 183 } 184 185 added = true; 186 } 187 188 if (added) 189 virtqueue_kick(vq); 190 191 out: 192 mutex_unlock(&vsock->tx_lock); 193 194 if (restart_rx) 195 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 196 } 197 198 static int 199 virtio_transport_send_pkt(struct sk_buff *skb) 200 { 201 struct virtio_vsock_hdr *hdr; 202 struct virtio_vsock *vsock; 203 int len = skb->len; 204 205 hdr = virtio_vsock_hdr(skb); 206 207 rcu_read_lock(); 208 vsock = rcu_dereference(the_virtio_vsock); 209 if (!vsock) { 210 kfree_skb(skb); 211 len = -ENODEV; 212 goto out_rcu; 213 } 214 215 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { 216 kfree_skb(skb); 217 len = -ENODEV; 218 goto out_rcu; 219 } 220 221 if (virtio_vsock_skb_reply(skb)) 222 atomic_inc(&vsock->queued_replies); 223 224 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); 225 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 226 227 out_rcu: 228 rcu_read_unlock(); 229 return len; 230 } 231 232 static int 233 virtio_transport_cancel_pkt(struct vsock_sock *vsk) 234 { 235 struct virtio_vsock *vsock; 236 int cnt = 0, ret; 237 238 rcu_read_lock(); 239 vsock = rcu_dereference(the_virtio_vsock); 240 if (!vsock) { 241 ret = -ENODEV; 242 goto out_rcu; 243 } 244 245 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); 246 247 if (cnt) { 248 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 249 int new_cnt; 250 251 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 252 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && 253 new_cnt < virtqueue_get_vring_size(rx_vq)) 254 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 255 } 256 257 ret = 0; 258 259 out_rcu: 260 rcu_read_unlock(); 261 return ret; 262 } 263 264 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 265 { 266 int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; 267 struct scatterlist pkt, *p; 268 struct virtqueue *vq; 269 struct sk_buff *skb; 270 int ret; 271 272 vq = vsock->vqs[VSOCK_VQ_RX]; 273 274 do { 275 skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL); 276 if (!skb) 277 break; 278 279 memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); 280 sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); 281 p = &pkt; 282 ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL); 283 if (ret < 0) { 284 kfree_skb(skb); 285 break; 286 } 287 288 vsock->rx_buf_nr++; 289 } while (vq->num_free); 290 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) 291 vsock->rx_buf_max_nr = vsock->rx_buf_nr; 292 virtqueue_kick(vq); 293 } 294 295 static void virtio_transport_tx_work(struct work_struct *work) 296 { 297 struct virtio_vsock *vsock = 298 container_of(work, struct virtio_vsock, tx_work); 299 struct virtqueue *vq; 300 bool added = false; 301 302 vq = vsock->vqs[VSOCK_VQ_TX]; 303 mutex_lock(&vsock->tx_lock); 304 305 if (!vsock->tx_run) 306 goto out; 307 308 do { 309 struct sk_buff *skb; 310 unsigned int len; 311 312 virtqueue_disable_cb(vq); 313 while ((skb = virtqueue_get_buf(vq, &len)) != NULL) { 314 consume_skb(skb); 315 added = true; 316 } 317 } while (!virtqueue_enable_cb(vq)); 318 319 out: 320 mutex_unlock(&vsock->tx_lock); 321 322 if (added) 323 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 324 } 325 326 /* Is there space left for replies to rx packets? */ 327 static bool virtio_transport_more_replies(struct virtio_vsock *vsock) 328 { 329 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; 330 int val; 331 332 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ 333 val = atomic_read(&vsock->queued_replies); 334 335 return val < virtqueue_get_vring_size(vq); 336 } 337 338 /* event_lock must be held */ 339 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, 340 struct virtio_vsock_event *event) 341 { 342 struct scatterlist sg; 343 struct virtqueue *vq; 344 345 vq = vsock->vqs[VSOCK_VQ_EVENT]; 346 347 sg_init_one(&sg, event, sizeof(*event)); 348 349 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); 350 } 351 352 /* event_lock must be held */ 353 static void virtio_vsock_event_fill(struct virtio_vsock *vsock) 354 { 355 size_t i; 356 357 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { 358 struct virtio_vsock_event *event = &vsock->event_list[i]; 359 360 virtio_vsock_event_fill_one(vsock, event); 361 } 362 363 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 364 } 365 366 static void virtio_vsock_reset_sock(struct sock *sk) 367 { 368 /* vmci_transport.c doesn't take sk_lock here either. At least we're 369 * under vsock_table_lock so the sock cannot disappear while we're 370 * executing. 371 */ 372 373 sk->sk_state = TCP_CLOSE; 374 sk->sk_err = ECONNRESET; 375 sk_error_report(sk); 376 } 377 378 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) 379 { 380 struct virtio_device *vdev = vsock->vdev; 381 __le64 guest_cid; 382 383 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), 384 &guest_cid, sizeof(guest_cid)); 385 vsock->guest_cid = le64_to_cpu(guest_cid); 386 } 387 388 /* event_lock must be held */ 389 static void virtio_vsock_event_handle(struct virtio_vsock *vsock, 390 struct virtio_vsock_event *event) 391 { 392 switch (le32_to_cpu(event->id)) { 393 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: 394 virtio_vsock_update_guest_cid(vsock); 395 vsock_for_each_connected_socket(&virtio_transport.transport, 396 virtio_vsock_reset_sock); 397 break; 398 } 399 } 400 401 static void virtio_transport_event_work(struct work_struct *work) 402 { 403 struct virtio_vsock *vsock = 404 container_of(work, struct virtio_vsock, event_work); 405 struct virtqueue *vq; 406 407 vq = vsock->vqs[VSOCK_VQ_EVENT]; 408 409 mutex_lock(&vsock->event_lock); 410 411 if (!vsock->event_run) 412 goto out; 413 414 do { 415 struct virtio_vsock_event *event; 416 unsigned int len; 417 418 virtqueue_disable_cb(vq); 419 while ((event = virtqueue_get_buf(vq, &len)) != NULL) { 420 if (len == sizeof(*event)) 421 virtio_vsock_event_handle(vsock, event); 422 423 virtio_vsock_event_fill_one(vsock, event); 424 } 425 } while (!virtqueue_enable_cb(vq)); 426 427 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 428 out: 429 mutex_unlock(&vsock->event_lock); 430 } 431 432 static void virtio_vsock_event_done(struct virtqueue *vq) 433 { 434 struct virtio_vsock *vsock = vq->vdev->priv; 435 436 if (!vsock) 437 return; 438 queue_work(virtio_vsock_workqueue, &vsock->event_work); 439 } 440 441 static void virtio_vsock_tx_done(struct virtqueue *vq) 442 { 443 struct virtio_vsock *vsock = vq->vdev->priv; 444 445 if (!vsock) 446 return; 447 queue_work(virtio_vsock_workqueue, &vsock->tx_work); 448 } 449 450 static void virtio_vsock_rx_done(struct virtqueue *vq) 451 { 452 struct virtio_vsock *vsock = vq->vdev->priv; 453 454 if (!vsock) 455 return; 456 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 457 } 458 459 static bool virtio_transport_can_msgzerocopy(int bufs_num) 460 { 461 struct virtio_vsock *vsock; 462 bool res = false; 463 464 rcu_read_lock(); 465 466 vsock = rcu_dereference(the_virtio_vsock); 467 if (vsock) { 468 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; 469 470 /* Check that tx queue is large enough to keep whole 471 * data to send. This is needed, because when there is 472 * not enough free space in the queue, current skb to 473 * send will be reinserted to the head of tx list of 474 * the socket to retry transmission later, so if skb 475 * is bigger than whole queue, it will be reinserted 476 * again and again, thus blocking other skbs to be sent. 477 * Each page of the user provided buffer will be added 478 * as a single buffer to the tx virtqueue, so compare 479 * number of pages against maximum capacity of the queue. 480 */ 481 if (bufs_num <= vq->num_max) 482 res = true; 483 } 484 485 rcu_read_unlock(); 486 487 return res; 488 } 489 490 static bool virtio_transport_msgzerocopy_allow(void) 491 { 492 return true; 493 } 494 495 static bool virtio_transport_seqpacket_allow(u32 remote_cid); 496 497 static struct virtio_transport virtio_transport = { 498 .transport = { 499 .module = THIS_MODULE, 500 501 .get_local_cid = virtio_transport_get_local_cid, 502 503 .init = virtio_transport_do_socket_init, 504 .destruct = virtio_transport_destruct, 505 .release = virtio_transport_release, 506 .connect = virtio_transport_connect, 507 .shutdown = virtio_transport_shutdown, 508 .cancel_pkt = virtio_transport_cancel_pkt, 509 510 .dgram_bind = virtio_transport_dgram_bind, 511 .dgram_dequeue = virtio_transport_dgram_dequeue, 512 .dgram_enqueue = virtio_transport_dgram_enqueue, 513 .dgram_allow = virtio_transport_dgram_allow, 514 515 .stream_dequeue = virtio_transport_stream_dequeue, 516 .stream_enqueue = virtio_transport_stream_enqueue, 517 .stream_has_data = virtio_transport_stream_has_data, 518 .stream_has_space = virtio_transport_stream_has_space, 519 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, 520 .stream_is_active = virtio_transport_stream_is_active, 521 .stream_allow = virtio_transport_stream_allow, 522 523 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, 524 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, 525 .seqpacket_allow = virtio_transport_seqpacket_allow, 526 .seqpacket_has_data = virtio_transport_seqpacket_has_data, 527 528 .msgzerocopy_allow = virtio_transport_msgzerocopy_allow, 529 530 .notify_poll_in = virtio_transport_notify_poll_in, 531 .notify_poll_out = virtio_transport_notify_poll_out, 532 .notify_recv_init = virtio_transport_notify_recv_init, 533 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, 534 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, 535 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, 536 .notify_send_init = virtio_transport_notify_send_init, 537 .notify_send_pre_block = virtio_transport_notify_send_pre_block, 538 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, 539 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, 540 .notify_buffer_size = virtio_transport_notify_buffer_size, 541 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, 542 543 .read_skb = virtio_transport_read_skb, 544 }, 545 546 .send_pkt = virtio_transport_send_pkt, 547 .can_msgzerocopy = virtio_transport_can_msgzerocopy, 548 }; 549 550 static bool virtio_transport_seqpacket_allow(u32 remote_cid) 551 { 552 struct virtio_vsock *vsock; 553 bool seqpacket_allow; 554 555 seqpacket_allow = false; 556 rcu_read_lock(); 557 vsock = rcu_dereference(the_virtio_vsock); 558 if (vsock) 559 seqpacket_allow = vsock->seqpacket_allow; 560 rcu_read_unlock(); 561 562 return seqpacket_allow; 563 } 564 565 static void virtio_transport_rx_work(struct work_struct *work) 566 { 567 struct virtio_vsock *vsock = 568 container_of(work, struct virtio_vsock, rx_work); 569 struct virtqueue *vq; 570 571 vq = vsock->vqs[VSOCK_VQ_RX]; 572 573 mutex_lock(&vsock->rx_lock); 574 575 if (!vsock->rx_run) 576 goto out; 577 578 do { 579 virtqueue_disable_cb(vq); 580 for (;;) { 581 struct sk_buff *skb; 582 unsigned int len; 583 584 if (!virtio_transport_more_replies(vsock)) { 585 /* Stop rx until the device processes already 586 * pending replies. Leave rx virtqueue 587 * callbacks disabled. 588 */ 589 goto out; 590 } 591 592 skb = virtqueue_get_buf(vq, &len); 593 if (!skb) 594 break; 595 596 vsock->rx_buf_nr--; 597 598 /* Drop short/long packets */ 599 if (unlikely(len < sizeof(struct virtio_vsock_hdr) || 600 len > virtio_vsock_skb_len(skb))) { 601 kfree_skb(skb); 602 continue; 603 } 604 605 virtio_vsock_skb_rx_put(skb); 606 virtio_transport_deliver_tap_pkt(skb); 607 virtio_transport_recv_pkt(&virtio_transport, skb); 608 } 609 } while (!virtqueue_enable_cb(vq)); 610 611 out: 612 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) 613 virtio_vsock_rx_fill(vsock); 614 mutex_unlock(&vsock->rx_lock); 615 } 616 617 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) 618 { 619 struct virtio_device *vdev = vsock->vdev; 620 struct virtqueue_info vqs_info[] = { 621 { "rx", virtio_vsock_rx_done }, 622 { "tx", virtio_vsock_tx_done }, 623 { "event", virtio_vsock_event_done }, 624 }; 625 int ret; 626 627 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL); 628 if (ret < 0) 629 return ret; 630 631 virtio_vsock_update_guest_cid(vsock); 632 633 virtio_device_ready(vdev); 634 635 return 0; 636 } 637 638 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) 639 { 640 mutex_lock(&vsock->tx_lock); 641 vsock->tx_run = true; 642 mutex_unlock(&vsock->tx_lock); 643 644 mutex_lock(&vsock->rx_lock); 645 virtio_vsock_rx_fill(vsock); 646 vsock->rx_run = true; 647 mutex_unlock(&vsock->rx_lock); 648 649 mutex_lock(&vsock->event_lock); 650 virtio_vsock_event_fill(vsock); 651 vsock->event_run = true; 652 mutex_unlock(&vsock->event_lock); 653 654 /* virtio_transport_send_pkt() can queue packets once 655 * the_virtio_vsock is set, but they won't be processed until 656 * vsock->tx_run is set to true. We queue vsock->send_pkt_work 657 * when initialization finishes to send those packets queued 658 * earlier. 659 * We don't need to queue the other workers (rx, event) because 660 * as long as we don't fill the queues with empty buffers, the 661 * host can't send us any notification. 662 */ 663 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 664 } 665 666 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) 667 { 668 struct virtio_device *vdev = vsock->vdev; 669 struct sk_buff *skb; 670 671 /* Reset all connected sockets when the VQs disappear */ 672 vsock_for_each_connected_socket(&virtio_transport.transport, 673 virtio_vsock_reset_sock); 674 675 /* Stop all work handlers to make sure no one is accessing the device, 676 * so we can safely call virtio_reset_device(). 677 */ 678 mutex_lock(&vsock->rx_lock); 679 vsock->rx_run = false; 680 mutex_unlock(&vsock->rx_lock); 681 682 mutex_lock(&vsock->tx_lock); 683 vsock->tx_run = false; 684 mutex_unlock(&vsock->tx_lock); 685 686 mutex_lock(&vsock->event_lock); 687 vsock->event_run = false; 688 mutex_unlock(&vsock->event_lock); 689 690 /* Flush all device writes and interrupts, device will not use any 691 * more buffers. 692 */ 693 virtio_reset_device(vdev); 694 695 mutex_lock(&vsock->rx_lock); 696 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) 697 kfree_skb(skb); 698 mutex_unlock(&vsock->rx_lock); 699 700 mutex_lock(&vsock->tx_lock); 701 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) 702 kfree_skb(skb); 703 mutex_unlock(&vsock->tx_lock); 704 705 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); 706 707 /* Delete virtqueues and flush outstanding callbacks if any */ 708 vdev->config->del_vqs(vdev); 709 } 710 711 static int virtio_vsock_probe(struct virtio_device *vdev) 712 { 713 struct virtio_vsock *vsock = NULL; 714 int ret; 715 int i; 716 717 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); 718 if (ret) 719 return ret; 720 721 /* Only one virtio-vsock device per guest is supported */ 722 if (rcu_dereference_protected(the_virtio_vsock, 723 lockdep_is_held(&the_virtio_vsock_mutex))) { 724 ret = -EBUSY; 725 goto out; 726 } 727 728 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); 729 if (!vsock) { 730 ret = -ENOMEM; 731 goto out; 732 } 733 734 vsock->vdev = vdev; 735 736 vsock->rx_buf_nr = 0; 737 vsock->rx_buf_max_nr = 0; 738 atomic_set(&vsock->queued_replies, 0); 739 740 mutex_init(&vsock->tx_lock); 741 mutex_init(&vsock->rx_lock); 742 mutex_init(&vsock->event_lock); 743 skb_queue_head_init(&vsock->send_pkt_queue); 744 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); 745 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); 746 INIT_WORK(&vsock->event_work, virtio_transport_event_work); 747 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); 748 749 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) 750 vsock->seqpacket_allow = true; 751 752 vdev->priv = vsock; 753 754 ret = virtio_vsock_vqs_init(vsock); 755 if (ret < 0) 756 goto out; 757 758 for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) 759 vsock->out_sgs[i] = &vsock->out_bufs[i]; 760 761 rcu_assign_pointer(the_virtio_vsock, vsock); 762 virtio_vsock_vqs_start(vsock); 763 764 mutex_unlock(&the_virtio_vsock_mutex); 765 766 return 0; 767 768 out: 769 kfree(vsock); 770 mutex_unlock(&the_virtio_vsock_mutex); 771 return ret; 772 } 773 774 static void virtio_vsock_remove(struct virtio_device *vdev) 775 { 776 struct virtio_vsock *vsock = vdev->priv; 777 778 mutex_lock(&the_virtio_vsock_mutex); 779 780 vdev->priv = NULL; 781 rcu_assign_pointer(the_virtio_vsock, NULL); 782 synchronize_rcu(); 783 784 virtio_vsock_vqs_del(vsock); 785 786 /* Other works can be queued before 'config->del_vqs()', so we flush 787 * all works before to free the vsock object to avoid use after free. 788 */ 789 flush_work(&vsock->rx_work); 790 flush_work(&vsock->tx_work); 791 flush_work(&vsock->event_work); 792 flush_work(&vsock->send_pkt_work); 793 794 mutex_unlock(&the_virtio_vsock_mutex); 795 796 kfree(vsock); 797 } 798 799 #ifdef CONFIG_PM_SLEEP 800 static int virtio_vsock_freeze(struct virtio_device *vdev) 801 { 802 struct virtio_vsock *vsock = vdev->priv; 803 804 mutex_lock(&the_virtio_vsock_mutex); 805 806 rcu_assign_pointer(the_virtio_vsock, NULL); 807 synchronize_rcu(); 808 809 virtio_vsock_vqs_del(vsock); 810 811 mutex_unlock(&the_virtio_vsock_mutex); 812 813 return 0; 814 } 815 816 static int virtio_vsock_restore(struct virtio_device *vdev) 817 { 818 struct virtio_vsock *vsock = vdev->priv; 819 int ret; 820 821 mutex_lock(&the_virtio_vsock_mutex); 822 823 /* Only one virtio-vsock device per guest is supported */ 824 if (rcu_dereference_protected(the_virtio_vsock, 825 lockdep_is_held(&the_virtio_vsock_mutex))) { 826 ret = -EBUSY; 827 goto out; 828 } 829 830 ret = virtio_vsock_vqs_init(vsock); 831 if (ret < 0) 832 goto out; 833 834 rcu_assign_pointer(the_virtio_vsock, vsock); 835 virtio_vsock_vqs_start(vsock); 836 837 out: 838 mutex_unlock(&the_virtio_vsock_mutex); 839 return ret; 840 } 841 #endif /* CONFIG_PM_SLEEP */ 842 843 static struct virtio_device_id id_table[] = { 844 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, 845 { 0 }, 846 }; 847 848 static unsigned int features[] = { 849 VIRTIO_VSOCK_F_SEQPACKET 850 }; 851 852 static struct virtio_driver virtio_vsock_driver = { 853 .feature_table = features, 854 .feature_table_size = ARRAY_SIZE(features), 855 .driver.name = KBUILD_MODNAME, 856 .id_table = id_table, 857 .probe = virtio_vsock_probe, 858 .remove = virtio_vsock_remove, 859 #ifdef CONFIG_PM_SLEEP 860 .freeze = virtio_vsock_freeze, 861 .restore = virtio_vsock_restore, 862 #endif 863 }; 864 865 static int __init virtio_vsock_init(void) 866 { 867 int ret; 868 869 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 870 if (!virtio_vsock_workqueue) 871 return -ENOMEM; 872 873 ret = vsock_core_register(&virtio_transport.transport, 874 VSOCK_TRANSPORT_F_G2H); 875 if (ret) 876 goto out_wq; 877 878 ret = register_virtio_driver(&virtio_vsock_driver); 879 if (ret) 880 goto out_vci; 881 882 return 0; 883 884 out_vci: 885 vsock_core_unregister(&virtio_transport.transport); 886 out_wq: 887 destroy_workqueue(virtio_vsock_workqueue); 888 return ret; 889 } 890 891 static void __exit virtio_vsock_exit(void) 892 { 893 unregister_virtio_driver(&virtio_vsock_driver); 894 vsock_core_unregister(&virtio_transport.transport); 895 destroy_workqueue(virtio_vsock_workqueue); 896 } 897 898 module_init(virtio_vsock_init); 899 module_exit(virtio_vsock_exit); 900 MODULE_LICENSE("GPL v2"); 901 MODULE_AUTHOR("Asias He"); 902 MODULE_DESCRIPTION("virtio transport for vsock"); 903 MODULE_DEVICE_TABLE(virtio, id_table); 904
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.