1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Thunderbolt service API 4 * 5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2017, Intel Corporation 7 * Authors: Michael Jamet <michael.jamet@intel.com> 8 * Mika Westerberg <mika.westerberg@linux.intel.com> 9 */ 10 11 #ifndef THUNDERBOLT_H_ 12 #define THUNDERBOLT_H_ 13 14 #include <linux/device.h> 15 #include <linux/idr.h> 16 #include <linux/list.h> 17 #include <linux/mutex.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/pci.h> 20 #include <linux/uuid.h> 21 #include <linux/workqueue.h> 22 23 enum tb_cfg_pkg_type { 24 TB_CFG_PKG_READ = 1, 25 TB_CFG_PKG_WRITE = 2, 26 TB_CFG_PKG_ERROR = 3, 27 TB_CFG_PKG_NOTIFY_ACK = 4, 28 TB_CFG_PKG_EVENT = 5, 29 TB_CFG_PKG_XDOMAIN_REQ = 6, 30 TB_CFG_PKG_XDOMAIN_RESP = 7, 31 TB_CFG_PKG_OVERRIDE = 8, 32 TB_CFG_PKG_RESET = 9, 33 TB_CFG_PKG_ICM_EVENT = 10, 34 TB_CFG_PKG_ICM_CMD = 11, 35 TB_CFG_PKG_ICM_RESP = 12, 36 }; 37 38 /** 39 * enum tb_security_level - Thunderbolt security level 40 * @TB_SECURITY_NONE: No security, legacy mode 41 * @TB_SECURITY_USER: User approval required at minimum 42 * @TB_SECURITY_SECURE: One time saved key required at minimum 43 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB) 44 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected 45 * Thunderbolt dock (and Display Port). All PCIe 46 * links downstream of the dock are removed. 47 * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the 48 * PCIe tunneling is disabled from the BIOS. 49 */ 50 enum tb_security_level { 51 TB_SECURITY_NONE, 52 TB_SECURITY_USER, 53 TB_SECURITY_SECURE, 54 TB_SECURITY_DPONLY, 55 TB_SECURITY_USBONLY, 56 TB_SECURITY_NOPCIE, 57 }; 58 59 /** 60 * struct tb - main thunderbolt bus structure 61 * @dev: Domain device 62 * @lock: Big lock. Must be held when accessing any struct 63 * tb_switch / struct tb_port. 64 * @nhi: Pointer to the NHI structure 65 * @ctl: Control channel for this domain 66 * @wq: Ordered workqueue for all domain specific work 67 * @root_switch: Root switch of this domain 68 * @cm_ops: Connection manager specific operations vector 69 * @index: Linux assigned domain number 70 * @security_level: Current security level 71 * @nboot_acl: Number of boot ACLs the domain supports 72 * @privdata: Private connection manager specific data 73 */ 74 struct tb { 75 struct device dev; 76 struct mutex lock; 77 struct tb_nhi *nhi; 78 struct tb_ctl *ctl; 79 struct workqueue_struct *wq; 80 struct tb_switch *root_switch; 81 const struct tb_cm_ops *cm_ops; 82 int index; 83 enum tb_security_level security_level; 84 size_t nboot_acl; 85 unsigned long privdata[]; 86 }; 87 88 extern const struct bus_type tb_bus_type; 89 extern const struct device_type tb_service_type; 90 extern const struct device_type tb_xdomain_type; 91 92 #define TB_LINKS_PER_PHY_PORT 2 93 94 static inline unsigned int tb_phy_port_from_link(unsigned int link) 95 { 96 return (link - 1) / TB_LINKS_PER_PHY_PORT; 97 } 98 99 /** 100 * struct tb_property_dir - XDomain property directory 101 * @uuid: Directory UUID or %NULL if root directory 102 * @properties: List of properties in this directory 103 * 104 * User needs to provide serialization if needed. 105 */ 106 struct tb_property_dir { 107 const uuid_t *uuid; 108 struct list_head properties; 109 }; 110 111 enum tb_property_type { 112 TB_PROPERTY_TYPE_UNKNOWN = 0x00, 113 TB_PROPERTY_TYPE_DIRECTORY = 0x44, 114 TB_PROPERTY_TYPE_DATA = 0x64, 115 TB_PROPERTY_TYPE_TEXT = 0x74, 116 TB_PROPERTY_TYPE_VALUE = 0x76, 117 }; 118 119 #define TB_PROPERTY_KEY_SIZE 8 120 121 /** 122 * struct tb_property - XDomain property 123 * @list: Used to link properties together in a directory 124 * @key: Key for the property (always terminated). 125 * @type: Type of the property 126 * @length: Length of the property data in dwords 127 * @value: Property value 128 * 129 * Users use @type to determine which field in @value is filled. 130 */ 131 struct tb_property { 132 struct list_head list; 133 char key[TB_PROPERTY_KEY_SIZE + 1]; 134 enum tb_property_type type; 135 size_t length; 136 union { 137 struct tb_property_dir *dir; 138 u8 *data; 139 char *text; 140 u32 immediate; 141 } value; 142 }; 143 144 struct tb_property_dir *tb_property_parse_dir(const u32 *block, 145 size_t block_len); 146 ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, 147 size_t block_len); 148 struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir); 149 struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); 150 void tb_property_free_dir(struct tb_property_dir *dir); 151 int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, 152 u32 value); 153 int tb_property_add_data(struct tb_property_dir *parent, const char *key, 154 const void *buf, size_t buflen); 155 int tb_property_add_text(struct tb_property_dir *parent, const char *key, 156 const char *text); 157 int tb_property_add_dir(struct tb_property_dir *parent, const char *key, 158 struct tb_property_dir *dir); 159 void tb_property_remove(struct tb_property *tb_property); 160 struct tb_property *tb_property_find(struct tb_property_dir *dir, 161 const char *key, enum tb_property_type type); 162 struct tb_property *tb_property_get_next(struct tb_property_dir *dir, 163 struct tb_property *prev); 164 165 #define tb_property_for_each(dir, property) \ 166 for (property = tb_property_get_next(dir, NULL); \ 167 property; \ 168 property = tb_property_get_next(dir, property)) 169 170 int tb_register_property_dir(const char *key, struct tb_property_dir *dir); 171 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); 172 173 /** 174 * enum tb_link_width - Thunderbolt/USB4 link width 175 * @TB_LINK_WIDTH_SINGLE: Single lane link 176 * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link 177 * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters 178 * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers 179 */ 180 enum tb_link_width { 181 TB_LINK_WIDTH_SINGLE = BIT(0), 182 TB_LINK_WIDTH_DUAL = BIT(1), 183 TB_LINK_WIDTH_ASYM_TX = BIT(2), 184 TB_LINK_WIDTH_ASYM_RX = BIT(3), 185 }; 186 187 /** 188 * struct tb_xdomain - Cross-domain (XDomain) connection 189 * @dev: XDomain device 190 * @tb: Pointer to the domain 191 * @remote_uuid: UUID of the remote domain (host) 192 * @local_uuid: Cached local UUID 193 * @route: Route string the other domain can be reached 194 * @vendor: Vendor ID of the remote domain 195 * @device: Device ID of the demote domain 196 * @local_max_hopid: Maximum input HopID of this host 197 * @remote_max_hopid: Maximum input HopID of the remote host 198 * @lock: Lock to serialize access to the following fields of this structure 199 * @vendor_name: Name of the vendor (or %NULL if not known) 200 * @device_name: Name of the device (or %NULL if not known) 201 * @link_speed: Speed of the link in Gb/s 202 * @link_width: Width of the downstream facing link 203 * @link_usb4: Downstream link is USB4 204 * @is_unplugged: The XDomain is unplugged 205 * @needs_uuid: If the XDomain does not have @remote_uuid it will be 206 * queried first 207 * @service_ids: Used to generate IDs for the services 208 * @in_hopids: Input HopIDs for DMA tunneling 209 * @out_hopids; Output HopIDs for DMA tunneling 210 * @local_property_block: Local block of properties 211 * @local_property_block_gen: Generation of @local_property_block 212 * @local_property_block_len: Length of the @local_property_block in dwords 213 * @remote_properties: Properties exported by the remote domain 214 * @remote_property_block_gen: Generation of @remote_properties 215 * @state: Next XDomain discovery state to run 216 * @state_work: Work used to run the next state 217 * @state_retries: Number of retries remain for the state 218 * @properties_changed_work: Work used to notify the remote domain that 219 * our properties have changed 220 * @properties_changed_retries: Number of times left to send properties 221 * changed notification 222 * @bonding_possible: True if lane bonding is possible on local side 223 * @target_link_width: Target link width from the remote host 224 * @link: Root switch link the remote domain is connected (ICM only) 225 * @depth: Depth in the chain the remote domain is connected (ICM only) 226 * 227 * This structure represents connection across two domains (hosts). 228 * Each XDomain contains zero or more services which are exposed as 229 * &struct tb_service objects. 230 * 231 * Service drivers may access this structure if they need to enumerate 232 * non-standard properties but they need hold @lock when doing so 233 * because properties can be changed asynchronously in response to 234 * changes in the remote domain. 235 */ 236 struct tb_xdomain { 237 struct device dev; 238 struct tb *tb; 239 uuid_t *remote_uuid; 240 const uuid_t *local_uuid; 241 u64 route; 242 u16 vendor; 243 u16 device; 244 unsigned int local_max_hopid; 245 unsigned int remote_max_hopid; 246 struct mutex lock; 247 const char *vendor_name; 248 const char *device_name; 249 unsigned int link_speed; 250 enum tb_link_width link_width; 251 bool link_usb4; 252 bool is_unplugged; 253 bool needs_uuid; 254 struct ida service_ids; 255 struct ida in_hopids; 256 struct ida out_hopids; 257 u32 *local_property_block; 258 u32 local_property_block_gen; 259 u32 local_property_block_len; 260 struct tb_property_dir *remote_properties; 261 u32 remote_property_block_gen; 262 int state; 263 struct delayed_work state_work; 264 int state_retries; 265 struct delayed_work properties_changed_work; 266 int properties_changed_retries; 267 bool bonding_possible; 268 u8 target_link_width; 269 u8 link; 270 u8 depth; 271 }; 272 273 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); 274 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); 275 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid); 276 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid); 277 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid); 278 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid); 279 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, 280 int transmit_ring, int receive_path, 281 int receive_ring); 282 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, 283 int transmit_ring, int receive_path, 284 int receive_ring); 285 286 static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd) 287 { 288 return tb_xdomain_disable_paths(xd, -1, -1, -1, -1); 289 } 290 291 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); 292 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); 293 294 static inline struct tb_xdomain * 295 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid) 296 { 297 struct tb_xdomain *xd; 298 299 mutex_lock(&tb->lock); 300 xd = tb_xdomain_find_by_uuid(tb, uuid); 301 mutex_unlock(&tb->lock); 302 303 return xd; 304 } 305 306 static inline struct tb_xdomain * 307 tb_xdomain_find_by_route_locked(struct tb *tb, u64 route) 308 { 309 struct tb_xdomain *xd; 310 311 mutex_lock(&tb->lock); 312 xd = tb_xdomain_find_by_route(tb, route); 313 mutex_unlock(&tb->lock); 314 315 return xd; 316 } 317 318 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd) 319 { 320 if (xd) 321 get_device(&xd->dev); 322 return xd; 323 } 324 325 static inline void tb_xdomain_put(struct tb_xdomain *xd) 326 { 327 if (xd) 328 put_device(&xd->dev); 329 } 330 331 static inline bool tb_is_xdomain(const struct device *dev) 332 { 333 return dev->type == &tb_xdomain_type; 334 } 335 336 static inline struct tb_xdomain *tb_to_xdomain(struct device *dev) 337 { 338 if (tb_is_xdomain(dev)) 339 return container_of(dev, struct tb_xdomain, dev); 340 return NULL; 341 } 342 343 int tb_xdomain_response(struct tb_xdomain *xd, const void *response, 344 size_t size, enum tb_cfg_pkg_type type); 345 int tb_xdomain_request(struct tb_xdomain *xd, const void *request, 346 size_t request_size, enum tb_cfg_pkg_type request_type, 347 void *response, size_t response_size, 348 enum tb_cfg_pkg_type response_type, 349 unsigned int timeout_msec); 350 351 /** 352 * tb_protocol_handler - Protocol specific handler 353 * @uuid: XDomain messages with this UUID are dispatched to this handler 354 * @callback: Callback called with the XDomain message. Returning %1 355 * here tells the XDomain core that the message was handled 356 * by this handler and should not be forwared to other 357 * handlers. 358 * @data: Data passed with the callback 359 * @list: Handlers are linked using this 360 * 361 * Thunderbolt services can hook into incoming XDomain requests by 362 * registering protocol handler. Only limitation is that the XDomain 363 * discovery protocol UUID cannot be registered since it is handled by 364 * the core XDomain code. 365 * 366 * The @callback must check that the message is really directed to the 367 * service the driver implements. 368 */ 369 struct tb_protocol_handler { 370 const uuid_t *uuid; 371 int (*callback)(const void *buf, size_t size, void *data); 372 void *data; 373 struct list_head list; 374 }; 375 376 int tb_register_protocol_handler(struct tb_protocol_handler *handler); 377 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler); 378 379 /** 380 * struct tb_service - Thunderbolt service 381 * @dev: XDomain device 382 * @id: ID of the service (shown in sysfs) 383 * @key: Protocol key from the properties directory 384 * @prtcid: Protocol ID from the properties directory 385 * @prtcvers: Protocol version from the properties directory 386 * @prtcrevs: Protocol software revision from the properties directory 387 * @prtcstns: Protocol settings mask from the properties directory 388 * @debugfs_dir: Pointer to the service debugfs directory. Always created 389 * when debugfs is enabled. Can be used by service drivers to 390 * add their own entries under the service. 391 * 392 * Each domain exposes set of services it supports as collection of 393 * properties. For each service there will be one corresponding 394 * &struct tb_service. Service drivers are bound to these. 395 */ 396 struct tb_service { 397 struct device dev; 398 int id; 399 const char *key; 400 u32 prtcid; 401 u32 prtcvers; 402 u32 prtcrevs; 403 u32 prtcstns; 404 struct dentry *debugfs_dir; 405 }; 406 407 static inline struct tb_service *tb_service_get(struct tb_service *svc) 408 { 409 if (svc) 410 get_device(&svc->dev); 411 return svc; 412 } 413 414 static inline void tb_service_put(struct tb_service *svc) 415 { 416 if (svc) 417 put_device(&svc->dev); 418 } 419 420 static inline bool tb_is_service(const struct device *dev) 421 { 422 return dev->type == &tb_service_type; 423 } 424 425 static inline struct tb_service *tb_to_service(struct device *dev) 426 { 427 if (tb_is_service(dev)) 428 return container_of(dev, struct tb_service, dev); 429 return NULL; 430 } 431 432 /** 433 * tb_service_driver - Thunderbolt service driver 434 * @driver: Driver structure 435 * @probe: Called when the driver is probed 436 * @remove: Called when the driver is removed (optional) 437 * @shutdown: Called at shutdown time to stop the service (optional) 438 * @id_table: Table of service identifiers the driver supports 439 */ 440 struct tb_service_driver { 441 struct device_driver driver; 442 int (*probe)(struct tb_service *svc, const struct tb_service_id *id); 443 void (*remove)(struct tb_service *svc); 444 void (*shutdown)(struct tb_service *svc); 445 const struct tb_service_id *id_table; 446 }; 447 448 #define TB_SERVICE(key, id) \ 449 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \ 450 TBSVC_MATCH_PROTOCOL_ID, \ 451 .protocol_key = (key), \ 452 .protocol_id = (id) 453 454 int tb_register_service_driver(struct tb_service_driver *drv); 455 void tb_unregister_service_driver(struct tb_service_driver *drv); 456 457 static inline void *tb_service_get_drvdata(const struct tb_service *svc) 458 { 459 return dev_get_drvdata(&svc->dev); 460 } 461 462 static inline void tb_service_set_drvdata(struct tb_service *svc, void *data) 463 { 464 dev_set_drvdata(&svc->dev, data); 465 } 466 467 static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) 468 { 469 return tb_to_xdomain(svc->dev.parent); 470 } 471 472 /** 473 * struct tb_nhi - thunderbolt native host interface 474 * @lock: Must be held during ring creation/destruction. Is acquired by 475 * interrupt_work when dispatching interrupts to individual rings. 476 * @pdev: Pointer to the PCI device 477 * @ops: NHI specific optional ops 478 * @iobase: MMIO space of the NHI 479 * @tx_rings: All Tx rings available on this host controller 480 * @rx_rings: All Rx rings available on this host controller 481 * @msix_ida: Used to allocate MSI-X vectors for rings 482 * @going_away: The host controller device is about to disappear so when 483 * this flag is set, avoid touching the hardware anymore. 484 * @iommu_dma_protection: An IOMMU will isolate external-facing ports. 485 * @interrupt_work: Work scheduled to handle ring interrupt when no 486 * MSI-X is used. 487 * @hop_count: Number of rings (end point hops) supported by NHI. 488 * @quirks: NHI specific quirks if any 489 */ 490 struct tb_nhi { 491 spinlock_t lock; 492 struct pci_dev *pdev; 493 const struct tb_nhi_ops *ops; 494 void __iomem *iobase; 495 struct tb_ring **tx_rings; 496 struct tb_ring **rx_rings; 497 struct ida msix_ida; 498 bool going_away; 499 bool iommu_dma_protection; 500 struct work_struct interrupt_work; 501 u32 hop_count; 502 unsigned long quirks; 503 }; 504 505 /** 506 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI 507 * @lock: Lock serializing actions to this ring. Must be acquired after 508 * nhi->lock. 509 * @nhi: Pointer to the native host controller interface 510 * @size: Size of the ring 511 * @hop: Hop (DMA channel) associated with this ring 512 * @head: Head of the ring (write next descriptor here) 513 * @tail: Tail of the ring (complete next descriptor here) 514 * @descriptors: Allocated descriptors for this ring 515 * @queue: Queue holding frames to be transferred over this ring 516 * @in_flight: Queue holding frames that are currently in flight 517 * @work: Interrupt work structure 518 * @is_tx: Is the ring Tx or Rx 519 * @running: Is the ring running 520 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. 521 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) 522 * @flags: Ring specific flags 523 * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to 524 * RX ring. For TX ring this should be set to %0. 525 * @sof_mask: Bit mask used to detect start of frame PDF 526 * @eof_mask: Bit mask used to detect end of frame PDF 527 * @start_poll: Called when ring interrupt is triggered to start 528 * polling. Passing %NULL keeps the ring in interrupt mode. 529 * @poll_data: Data passed to @start_poll 530 */ 531 struct tb_ring { 532 spinlock_t lock; 533 struct tb_nhi *nhi; 534 int size; 535 int hop; 536 int head; 537 int tail; 538 struct ring_desc *descriptors; 539 dma_addr_t descriptors_dma; 540 struct list_head queue; 541 struct list_head in_flight; 542 struct work_struct work; 543 bool is_tx:1; 544 bool running:1; 545 int irq; 546 u8 vector; 547 unsigned int flags; 548 int e2e_tx_hop; 549 u16 sof_mask; 550 u16 eof_mask; 551 void (*start_poll)(void *data); 552 void *poll_data; 553 }; 554 555 /* Leave ring interrupt enabled on suspend */ 556 #define RING_FLAG_NO_SUSPEND BIT(0) 557 /* Configure the ring to be in frame mode */ 558 #define RING_FLAG_FRAME BIT(1) 559 /* Enable end-to-end flow control */ 560 #define RING_FLAG_E2E BIT(2) 561 562 struct ring_frame; 563 typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); 564 565 /** 566 * enum ring_desc_flags - Flags for DMA ring descriptor 567 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only) 568 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only) 569 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI) 570 * %RING_DESC_POSTED: Always set this 571 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun 572 * %RING_DESC_INTERRUPT: Request an interrupt on completion 573 */ 574 enum ring_desc_flags { 575 RING_DESC_ISOCH = 0x1, 576 RING_DESC_CRC_ERROR = 0x1, 577 RING_DESC_COMPLETED = 0x2, 578 RING_DESC_POSTED = 0x4, 579 RING_DESC_BUFFER_OVERRUN = 0x04, 580 RING_DESC_INTERRUPT = 0x8, 581 }; 582 583 /** 584 * struct ring_frame - For use with ring_rx/ring_tx 585 * @buffer_phy: DMA mapped address of the frame 586 * @callback: Callback called when the frame is finished (optional) 587 * @list: Frame is linked to a queue using this 588 * @size: Size of the frame in bytes (%0 means %4096) 589 * @flags: Flags for the frame (see &enum ring_desc_flags) 590 * @eof: End of frame protocol defined field 591 * @sof: Start of frame protocol defined field 592 */ 593 struct ring_frame { 594 dma_addr_t buffer_phy; 595 ring_cb callback; 596 struct list_head list; 597 u32 size:12; 598 u32 flags:12; 599 u32 eof:4; 600 u32 sof:4; 601 }; 602 603 /* Minimum size for ring_rx */ 604 #define TB_FRAME_SIZE 0x100 605 606 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, 607 unsigned int flags); 608 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, 609 unsigned int flags, int e2e_tx_hop, 610 u16 sof_mask, u16 eof_mask, 611 void (*start_poll)(void *), void *poll_data); 612 void tb_ring_start(struct tb_ring *ring); 613 void tb_ring_stop(struct tb_ring *ring); 614 void tb_ring_free(struct tb_ring *ring); 615 616 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); 617 618 /** 619 * tb_ring_rx() - enqueue a frame on an RX ring 620 * @ring: Ring to enqueue the frame 621 * @frame: Frame to enqueue 622 * 623 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must 624 * contain at least %TB_FRAME_SIZE bytes. 625 * 626 * @frame->callback will be invoked with @frame->size, @frame->flags, 627 * @frame->eof, @frame->sof set once the frame has been received. 628 * 629 * If ring_stop() is called after the packet has been enqueued 630 * @frame->callback will be called with canceled set to true. 631 * 632 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. 633 */ 634 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame) 635 { 636 WARN_ON(ring->is_tx); 637 return __tb_ring_enqueue(ring, frame); 638 } 639 640 /** 641 * tb_ring_tx() - enqueue a frame on an TX ring 642 * @ring: Ring the enqueue the frame 643 * @frame: Frame to enqueue 644 * 645 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and 646 * @frame->sof have to be set. 647 * 648 * @frame->callback will be invoked with once the frame has been transmitted. 649 * 650 * If ring_stop() is called after the packet has been enqueued @frame->callback 651 * will be called with canceled set to true. 652 * 653 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. 654 */ 655 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame) 656 { 657 WARN_ON(!ring->is_tx); 658 return __tb_ring_enqueue(ring, frame); 659 } 660 661 /* Used only when the ring is in polling mode */ 662 struct ring_frame *tb_ring_poll(struct tb_ring *ring); 663 void tb_ring_poll_complete(struct tb_ring *ring); 664 665 /** 666 * tb_ring_dma_device() - Return device used for DMA mapping 667 * @ring: Ring whose DMA device is retrieved 668 * 669 * Use this function when you are mapping DMA for buffers that are 670 * passed to the ring for sending/receiving. 671 */ 672 static inline struct device *tb_ring_dma_device(struct tb_ring *ring) 673 { 674 return &ring->nhi->pdev->dev; 675 } 676 677 #endif /* THUNDERBOLT_H_ */ 678
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.