1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7 #ifndef __LINUX_IOMMU_H 8 #define __LINUX_IOMMU_H 9 10 #include <linux/scatterlist.h> 11 #include <linux/device.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/of.h> 16 #include <linux/iova_bitmap.h> 17 18 #define IOMMU_READ (1 << 0) 19 #define IOMMU_WRITE (1 << 1) 20 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 21 #define IOMMU_NOEXEC (1 << 3) 22 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 23 /* 24 * Where the bus hardware includes a privilege level as part of its access type 25 * markings, and certain devices are capable of issuing transactions marked as 26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 27 * given permission flags only apply to accesses at the higher privilege level, 28 * and that unprivileged transactions should have as little access as possible. 29 * This would usually imply the same permissions as kernel mappings on the CPU, 30 * if the IOMMU page table format is equivalent. 31 */ 32 #define IOMMU_PRIV (1 << 5) 33 34 struct iommu_ops; 35 struct iommu_group; 36 struct bus_type; 37 struct device; 38 struct iommu_domain; 39 struct iommu_domain_ops; 40 struct iommu_dirty_ops; 41 struct notifier_block; 42 struct iommu_sva; 43 struct iommu_dma_cookie; 44 struct iommu_fault_param; 45 46 #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ 47 #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ 48 #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ 49 #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ 50 51 /* Generic fault types, can be expanded IRQ remapping fault */ 52 enum iommu_fault_type { 53 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */ 54 }; 55 56 /** 57 * struct iommu_fault_page_request - Page Request data 58 * @flags: encodes whether the corresponding fields are valid and whether this 59 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). 60 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response 61 * must have the same PASID value as the page request. When it is clear, 62 * the page response should not have a PASID. 63 * @pasid: Process Address Space ID 64 * @grpid: Page Request Group Index 65 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) 66 * @addr: page address 67 * @private_data: device-specific private information 68 */ 69 struct iommu_fault_page_request { 70 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) 71 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) 72 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2) 73 u32 flags; 74 u32 pasid; 75 u32 grpid; 76 u32 perm; 77 u64 addr; 78 u64 private_data[2]; 79 }; 80 81 /** 82 * struct iommu_fault - Generic fault data 83 * @type: fault type from &enum iommu_fault_type 84 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ 85 */ 86 struct iommu_fault { 87 u32 type; 88 struct iommu_fault_page_request prm; 89 }; 90 91 /** 92 * enum iommu_page_response_code - Return status of fault handlers 93 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables 94 * populated, retry the access. This is "Success" in PCI PRI. 95 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from 96 * this device if possible. This is "Response Failure" in PCI PRI. 97 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the 98 * access. This is "Invalid Request" in PCI PRI. 99 */ 100 enum iommu_page_response_code { 101 IOMMU_PAGE_RESP_SUCCESS = 0, 102 IOMMU_PAGE_RESP_INVALID, 103 IOMMU_PAGE_RESP_FAILURE, 104 }; 105 106 /** 107 * struct iommu_page_response - Generic page response information 108 * @pasid: Process Address Space ID 109 * @grpid: Page Request Group Index 110 * @code: response code from &enum iommu_page_response_code 111 */ 112 struct iommu_page_response { 113 u32 pasid; 114 u32 grpid; 115 u32 code; 116 }; 117 118 struct iopf_fault { 119 struct iommu_fault fault; 120 /* node for pending lists */ 121 struct list_head list; 122 }; 123 124 struct iopf_group { 125 struct iopf_fault last_fault; 126 struct list_head faults; 127 size_t fault_count; 128 /* list node for iommu_fault_param::faults */ 129 struct list_head pending_node; 130 struct work_struct work; 131 struct iommu_attach_handle *attach_handle; 132 /* The device's fault data parameter. */ 133 struct iommu_fault_param *fault_param; 134 /* Used by handler provider to hook the group on its own lists. */ 135 struct list_head node; 136 u32 cookie; 137 }; 138 139 /** 140 * struct iopf_queue - IO Page Fault queue 141 * @wq: the fault workqueue 142 * @devices: devices attached to this queue 143 * @lock: protects the device list 144 */ 145 struct iopf_queue { 146 struct workqueue_struct *wq; 147 struct list_head devices; 148 struct mutex lock; 149 }; 150 151 /* iommu fault flags */ 152 #define IOMMU_FAULT_READ 0x0 153 #define IOMMU_FAULT_WRITE 0x1 154 155 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 156 struct device *, unsigned long, int, void *); 157 158 struct iommu_domain_geometry { 159 dma_addr_t aperture_start; /* First address that can be mapped */ 160 dma_addr_t aperture_end; /* Last address that can be mapped */ 161 bool force_aperture; /* DMA only allowed in mappable range? */ 162 }; 163 164 /* Domain feature flags */ 165 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 166 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 167 implementation */ 168 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 169 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 170 171 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ 172 #define __IOMMU_DOMAIN_PLATFORM (1U << 5) 173 174 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested 175 on a stage-2 translation */ 176 177 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ 178 /* 179 * This are the possible domain-types 180 * 181 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 182 * devices 183 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 184 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 185 * for VMs 186 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 187 * This flag allows IOMMU drivers to implement 188 * certain optimizations for these domains 189 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 190 * invalidation. 191 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses 192 * represented by mm_struct's. 193 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own 194 * dma_api stuff. Do not use in new drivers. 195 */ 196 #define IOMMU_DOMAIN_BLOCKED (0U) 197 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 198 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 199 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 200 __IOMMU_DOMAIN_DMA_API) 201 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 202 __IOMMU_DOMAIN_DMA_API | \ 203 __IOMMU_DOMAIN_DMA_FQ) 204 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) 205 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) 206 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) 207 208 struct iommu_domain { 209 unsigned type; 210 const struct iommu_domain_ops *ops; 211 const struct iommu_dirty_ops *dirty_ops; 212 const struct iommu_ops *owner; /* Whose domain_alloc we came from */ 213 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 214 struct iommu_domain_geometry geometry; 215 struct iommu_dma_cookie *iova_cookie; 216 int (*iopf_handler)(struct iopf_group *group); 217 void *fault_data; 218 union { 219 struct { 220 iommu_fault_handler_t handler; 221 void *handler_token; 222 }; 223 struct { /* IOMMU_DOMAIN_SVA */ 224 struct mm_struct *mm; 225 int users; 226 /* 227 * Next iommu_domain in mm->iommu_mm->sva-domains list 228 * protected by iommu_sva_lock. 229 */ 230 struct list_head next; 231 }; 232 }; 233 }; 234 235 static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 236 { 237 return domain->type & __IOMMU_DOMAIN_DMA_API; 238 } 239 240 enum iommu_cap { 241 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 242 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 243 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 244 DMA protection and we should too */ 245 /* 246 * Per-device flag indicating if enforce_cache_coherency() will work on 247 * this device. 248 */ 249 IOMMU_CAP_ENFORCE_CACHE_COHERENCY, 250 /* 251 * IOMMU driver does not issue TLB maintenance during .unmap, so can 252 * usefully support the non-strict DMA flush queue. 253 */ 254 IOMMU_CAP_DEFERRED_FLUSH, 255 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ 256 }; 257 258 /* These are the possible reserved region types */ 259 enum iommu_resv_type { 260 /* Memory regions which must be mapped 1:1 at all times */ 261 IOMMU_RESV_DIRECT, 262 /* 263 * Memory regions which are advertised to be 1:1 but are 264 * commonly considered relaxable in some conditions, 265 * for instance in device assignment use case (USB, Graphics) 266 */ 267 IOMMU_RESV_DIRECT_RELAXABLE, 268 /* Arbitrary "never map this or give it to a device" address ranges */ 269 IOMMU_RESV_RESERVED, 270 /* Hardware MSI region (untranslated) */ 271 IOMMU_RESV_MSI, 272 /* Software-managed MSI translation window */ 273 IOMMU_RESV_SW_MSI, 274 }; 275 276 /** 277 * struct iommu_resv_region - descriptor for a reserved memory region 278 * @list: Linked list pointers 279 * @start: System physical start address of the region 280 * @length: Length of the region in bytes 281 * @prot: IOMMU Protection flags (READ/WRITE/...) 282 * @type: Type of the reserved region 283 * @free: Callback to free associated memory allocations 284 */ 285 struct iommu_resv_region { 286 struct list_head list; 287 phys_addr_t start; 288 size_t length; 289 int prot; 290 enum iommu_resv_type type; 291 void (*free)(struct device *dev, struct iommu_resv_region *region); 292 }; 293 294 struct iommu_iort_rmr_data { 295 struct iommu_resv_region rr; 296 297 /* Stream IDs associated with IORT RMR entry */ 298 const u32 *sids; 299 u32 num_sids; 300 }; 301 302 /** 303 * enum iommu_dev_features - Per device IOMMU features 304 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 305 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 306 * enabling %IOMMU_DEV_FEAT_SVA requires 307 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 308 * Faults themselves instead of relying on the IOMMU. When 309 * supported, this feature must be enabled before and 310 * disabled after %IOMMU_DEV_FEAT_SVA. 311 * 312 * Device drivers enable a feature using iommu_dev_enable_feature(). 313 */ 314 enum iommu_dev_features { 315 IOMMU_DEV_FEAT_SVA, 316 IOMMU_DEV_FEAT_IOPF, 317 }; 318 319 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ 320 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ 321 #define IOMMU_PASID_INVALID (-1U) 322 typedef unsigned int ioasid_t; 323 324 /* Read but do not clear any dirty bits */ 325 #define IOMMU_DIRTY_NO_CLEAR (1 << 0) 326 327 #ifdef CONFIG_IOMMU_API 328 329 /** 330 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 331 * 332 * @start: IOVA representing the start of the range to be flushed 333 * @end: IOVA representing the end of the range to be flushed (inclusive) 334 * @pgsize: The interval at which to perform the flush 335 * @freelist: Removed pages to free after sync 336 * @queued: Indicates that the flush will be queued 337 * 338 * This structure is intended to be updated by multiple calls to the 339 * ->unmap() function in struct iommu_ops before eventually being passed 340 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 341 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 342 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 343 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 344 */ 345 struct iommu_iotlb_gather { 346 unsigned long start; 347 unsigned long end; 348 size_t pgsize; 349 struct list_head freelist; 350 bool queued; 351 }; 352 353 /** 354 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state 355 * @bitmap: IOVA bitmap 356 * @gather: Range information for a pending IOTLB flush 357 */ 358 struct iommu_dirty_bitmap { 359 struct iova_bitmap *bitmap; 360 struct iommu_iotlb_gather *gather; 361 }; 362 363 /** 364 * struct iommu_dirty_ops - domain specific dirty tracking operations 365 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain 366 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled 367 * into a bitmap, with a bit represented as a page. 368 * Reads the dirty PTE bits and clears it from IO 369 * pagetables. 370 */ 371 struct iommu_dirty_ops { 372 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); 373 int (*read_and_clear_dirty)(struct iommu_domain *domain, 374 unsigned long iova, size_t size, 375 unsigned long flags, 376 struct iommu_dirty_bitmap *dirty); 377 }; 378 379 /** 380 * struct iommu_user_data - iommu driver specific user space data info 381 * @type: The data type of the user buffer 382 * @uptr: Pointer to the user buffer for copy_from_user() 383 * @len: The length of the user buffer in bytes 384 * 385 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h 386 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. 387 */ 388 struct iommu_user_data { 389 unsigned int type; 390 void __user *uptr; 391 size_t len; 392 }; 393 394 /** 395 * struct iommu_user_data_array - iommu driver specific user space data array 396 * @type: The data type of all the entries in the user buffer array 397 * @uptr: Pointer to the user buffer array 398 * @entry_len: The fixed-width length of an entry in the array, in bytes 399 * @entry_num: The number of total entries in the array 400 * 401 * The user buffer includes an array of requests with format defined in 402 * include/uapi/linux/iommufd.h 403 */ 404 struct iommu_user_data_array { 405 unsigned int type; 406 void __user *uptr; 407 size_t entry_len; 408 u32 entry_num; 409 }; 410 411 /** 412 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data 413 * @dst_data: Pointer to an iommu driver specific user data that is defined in 414 * include/uapi/linux/iommufd.h 415 * @src_data: Pointer to a struct iommu_user_data for user space data info 416 * @data_type: The data type of the @dst_data. Must match with @src_data.type 417 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 418 * @min_len: Initial length of user data structure for backward compatibility. 419 * This should be offsetofend using the last member in the user data 420 * struct that was initially added to include/uapi/linux/iommufd.h 421 */ 422 static inline int __iommu_copy_struct_from_user( 423 void *dst_data, const struct iommu_user_data *src_data, 424 unsigned int data_type, size_t data_len, size_t min_len) 425 { 426 if (src_data->type != data_type) 427 return -EINVAL; 428 if (WARN_ON(!dst_data || !src_data)) 429 return -EINVAL; 430 if (src_data->len < min_len || data_len < src_data->len) 431 return -EINVAL; 432 return copy_struct_from_user(dst_data, data_len, src_data->uptr, 433 src_data->len); 434 } 435 436 /** 437 * iommu_copy_struct_from_user - Copy iommu driver specific user space data 438 * @kdst: Pointer to an iommu driver specific user data that is defined in 439 * include/uapi/linux/iommufd.h 440 * @user_data: Pointer to a struct iommu_user_data for user space data info 441 * @data_type: The data type of the @kdst. Must match with @user_data->type 442 * @min_last: The last memember of the data structure @kdst points in the 443 * initial version. 444 * Return 0 for success, otherwise -error. 445 */ 446 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ 447 __iommu_copy_struct_from_user(kdst, user_data, data_type, \ 448 sizeof(*kdst), \ 449 offsetofend(typeof(*kdst), min_last)) 450 451 /** 452 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space 453 * data from an iommu_user_data_array 454 * @dst_data: Pointer to an iommu driver specific user data that is defined in 455 * include/uapi/linux/iommufd.h 456 * @src_array: Pointer to a struct iommu_user_data_array for a user space array 457 * @data_type: The data type of the @dst_data. Must match with @src_array.type 458 * @index: Index to the location in the array to copy user data from 459 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 460 * @min_len: Initial length of user data structure for backward compatibility. 461 * This should be offsetofend using the last member in the user data 462 * struct that was initially added to include/uapi/linux/iommufd.h 463 */ 464 static inline int __iommu_copy_struct_from_user_array( 465 void *dst_data, const struct iommu_user_data_array *src_array, 466 unsigned int data_type, unsigned int index, size_t data_len, 467 size_t min_len) 468 { 469 struct iommu_user_data src_data; 470 471 if (WARN_ON(!src_array || index >= src_array->entry_num)) 472 return -EINVAL; 473 if (!src_array->entry_num) 474 return -EINVAL; 475 src_data.uptr = src_array->uptr + src_array->entry_len * index; 476 src_data.len = src_array->entry_len; 477 src_data.type = src_array->type; 478 479 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type, 480 data_len, min_len); 481 } 482 483 /** 484 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space 485 * data from an iommu_user_data_array 486 * @kdst: Pointer to an iommu driver specific user data that is defined in 487 * include/uapi/linux/iommufd.h 488 * @user_array: Pointer to a struct iommu_user_data_array for a user space 489 * array 490 * @data_type: The data type of the @kdst. Must match with @user_array->type 491 * @index: Index to the location in the array to copy user data from 492 * @min_last: The last member of the data structure @kdst points in the 493 * initial version. 494 * Return 0 for success, otherwise -error. 495 */ 496 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \ 497 min_last) \ 498 __iommu_copy_struct_from_user_array( \ 499 kdst, user_array, data_type, index, sizeof(*(kdst)), \ 500 offsetofend(typeof(*(kdst)), min_last)) 501 502 /** 503 * struct iommu_ops - iommu ops and capabilities 504 * @capable: check capability 505 * @hw_info: report iommu hardware information. The data buffer returned by this 506 * op is allocated in the iommu driver and freed by the caller after 507 * use. The information type is one of enum iommu_hw_info_type defined 508 * in include/uapi/linux/iommufd.h. 509 * @domain_alloc: allocate and return an iommu domain if success. Otherwise 510 * NULL is returned. The domain is not fully initialized until 511 * the caller iommu_domain_alloc() returns. 512 * @domain_alloc_user: Allocate an iommu domain corresponding to the input 513 * parameters as defined in include/uapi/linux/iommufd.h. 514 * Unlike @domain_alloc, it is called only by IOMMUFD and 515 * must fully initialize the new domain before return. 516 * Upon success, if the @user_data is valid and the @parent 517 * points to a kernel-managed domain, the new domain must be 518 * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be 519 * NULL while the @user_data can be optionally provided, the 520 * new domain must support __IOMMU_DOMAIN_PAGING. 521 * Upon failure, ERR_PTR must be returned. 522 * @domain_alloc_paging: Allocate an iommu_domain that can be used for 523 * UNMANAGED, DMA, and DMA_FQ domain types. 524 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. 525 * @probe_device: Add device to iommu driver handling 526 * @release_device: Remove device from iommu driver handling 527 * @probe_finalize: Do final setup work after the device is added to an IOMMU 528 * group and attached to the groups domain 529 * @device_group: find iommu group for a particular device 530 * @get_resv_regions: Request list of reserved regions for a device 531 * @of_xlate: add OF master IDs to iommu grouping 532 * @is_attach_deferred: Check if domain attach should be deferred from iommu 533 * driver init to device driver init (default no) 534 * @dev_enable/disable_feat: per device entries to enable/disable 535 * iommu specific features. 536 * @page_response: handle page request response 537 * @def_domain_type: device default domain type, return value: 538 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 539 * - IOMMU_DOMAIN_DMA: must use a dma domain 540 * - 0: use the default setting 541 * @default_domain_ops: the default ops for domains 542 * @remove_dev_pasid: Remove any translation configurations of a specific 543 * pasid, so that any DMA transactions with this pasid 544 * will be blocked by the hardware. 545 * @pgsize_bitmap: bitmap of all possible supported page sizes 546 * @owner: Driver module providing these ops 547 * @identity_domain: An always available, always attachable identity 548 * translation. 549 * @blocked_domain: An always available, always attachable blocking 550 * translation. 551 * @default_domain: If not NULL this will always be set as the default domain. 552 * This should be an IDENTITY/BLOCKED/PLATFORM domain. 553 * Do not use in new drivers. 554 * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is 555 * no user domain for each PASID and the I/O page faults are 556 * forwarded through the user domain attached to the device 557 * RID. 558 */ 559 struct iommu_ops { 560 bool (*capable)(struct device *dev, enum iommu_cap); 561 void *(*hw_info)(struct device *dev, u32 *length, u32 *type); 562 563 /* Domain allocation and freeing by the iommu driver */ 564 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 565 struct iommu_domain *(*domain_alloc_user)( 566 struct device *dev, u32 flags, struct iommu_domain *parent, 567 const struct iommu_user_data *user_data); 568 struct iommu_domain *(*domain_alloc_paging)(struct device *dev); 569 struct iommu_domain *(*domain_alloc_sva)(struct device *dev, 570 struct mm_struct *mm); 571 572 struct iommu_device *(*probe_device)(struct device *dev); 573 void (*release_device)(struct device *dev); 574 void (*probe_finalize)(struct device *dev); 575 struct iommu_group *(*device_group)(struct device *dev); 576 577 /* Request/Free a list of reserved regions for a device */ 578 void (*get_resv_regions)(struct device *dev, struct list_head *list); 579 580 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args); 581 bool (*is_attach_deferred)(struct device *dev); 582 583 /* Per device IOMMU features */ 584 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 585 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 586 587 void (*page_response)(struct device *dev, struct iopf_fault *evt, 588 struct iommu_page_response *msg); 589 590 int (*def_domain_type)(struct device *dev); 591 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, 592 struct iommu_domain *domain); 593 594 const struct iommu_domain_ops *default_domain_ops; 595 unsigned long pgsize_bitmap; 596 struct module *owner; 597 struct iommu_domain *identity_domain; 598 struct iommu_domain *blocked_domain; 599 struct iommu_domain *release_domain; 600 struct iommu_domain *default_domain; 601 u8 user_pasid_table:1; 602 }; 603 604 /** 605 * struct iommu_domain_ops - domain specific operations 606 * @attach_dev: attach an iommu domain to a device 607 * Return: 608 * * 0 - success 609 * * EINVAL - can indicate that device and domain are incompatible due to 610 * some previous configuration of the domain, in which case the 611 * driver shouldn't log an error, since it is legitimate for a 612 * caller to test reuse of existing domains. Otherwise, it may 613 * still represent some other fundamental problem 614 * * ENOMEM - out of memory 615 * * ENOSPC - non-ENOMEM type of resource allocation failures 616 * * EBUSY - device is attached to a domain and cannot be changed 617 * * ENODEV - device specific errors, not able to be attached 618 * * <others> - treated as ENODEV by the caller. Use is discouraged 619 * @set_dev_pasid: set an iommu domain to a pasid of device 620 * @map_pages: map a physically contiguous set of pages of the same size to 621 * an iommu domain. 622 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 623 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 624 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 625 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 626 * queue 627 * @cache_invalidate_user: Flush hardware cache for user space IO page table. 628 * The @domain must be IOMMU_DOMAIN_NESTED. The @array 629 * passes in the cache invalidation requests, in form 630 * of a driver data structure. The driver must update 631 * array->entry_num to report the number of handled 632 * invalidation requests. The driver data structure 633 * must be defined in include/uapi/linux/iommufd.h 634 * @iova_to_phys: translate iova to physical address 635 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 636 * including no-snoop TLPs on PCIe or other platform 637 * specific mechanisms. 638 * @enable_nesting: Enable nesting 639 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 640 * @free: Release the domain after use. 641 */ 642 struct iommu_domain_ops { 643 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 644 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, 645 ioasid_t pasid); 646 647 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 648 phys_addr_t paddr, size_t pgsize, size_t pgcount, 649 int prot, gfp_t gfp, size_t *mapped); 650 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 651 size_t pgsize, size_t pgcount, 652 struct iommu_iotlb_gather *iotlb_gather); 653 654 void (*flush_iotlb_all)(struct iommu_domain *domain); 655 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 656 size_t size); 657 void (*iotlb_sync)(struct iommu_domain *domain, 658 struct iommu_iotlb_gather *iotlb_gather); 659 int (*cache_invalidate_user)(struct iommu_domain *domain, 660 struct iommu_user_data_array *array); 661 662 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 663 dma_addr_t iova); 664 665 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 666 int (*enable_nesting)(struct iommu_domain *domain); 667 int (*set_pgtable_quirks)(struct iommu_domain *domain, 668 unsigned long quirks); 669 670 void (*free)(struct iommu_domain *domain); 671 }; 672 673 /** 674 * struct iommu_device - IOMMU core representation of one IOMMU hardware 675 * instance 676 * @list: Used by the iommu-core to keep a list of registered iommus 677 * @ops: iommu-ops for talking to this iommu 678 * @dev: struct device for sysfs handling 679 * @singleton_group: Used internally for drivers that have only one group 680 * @max_pasids: number of supported PASIDs 681 */ 682 struct iommu_device { 683 struct list_head list; 684 const struct iommu_ops *ops; 685 struct fwnode_handle *fwnode; 686 struct device *dev; 687 struct iommu_group *singleton_group; 688 u32 max_pasids; 689 }; 690 691 /** 692 * struct iommu_fault_param - per-device IOMMU fault data 693 * @lock: protect pending faults list 694 * @users: user counter to manage the lifetime of the data 695 * @rcu: rcu head for kfree_rcu() 696 * @dev: the device that owns this param 697 * @queue: IOPF queue 698 * @queue_list: index into queue->devices 699 * @partial: faults that are part of a Page Request Group for which the last 700 * request hasn't been submitted yet. 701 * @faults: holds the pending faults which need response 702 */ 703 struct iommu_fault_param { 704 struct mutex lock; 705 refcount_t users; 706 struct rcu_head rcu; 707 708 struct device *dev; 709 struct iopf_queue *queue; 710 struct list_head queue_list; 711 712 struct list_head partial; 713 struct list_head faults; 714 }; 715 716 /** 717 * struct dev_iommu - Collection of per-device IOMMU data 718 * 719 * @fault_param: IOMMU detected device fault reporting data 720 * @fwspec: IOMMU fwspec data 721 * @iommu_dev: IOMMU device this device is linked to 722 * @priv: IOMMU Driver private data 723 * @max_pasids: number of PASIDs this device can consume 724 * @attach_deferred: the dma domain attachment is deferred 725 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs 726 * @require_direct: device requires IOMMU_RESV_DIRECT regions 727 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables 728 * 729 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 730 * struct iommu_group *iommu_group; 731 */ 732 struct dev_iommu { 733 struct mutex lock; 734 struct iommu_fault_param __rcu *fault_param; 735 struct iommu_fwspec *fwspec; 736 struct iommu_device *iommu_dev; 737 void *priv; 738 u32 max_pasids; 739 u32 attach_deferred:1; 740 u32 pci_32bit_workaround:1; 741 u32 require_direct:1; 742 u32 shadow_on_flush:1; 743 }; 744 745 int iommu_device_register(struct iommu_device *iommu, 746 const struct iommu_ops *ops, 747 struct device *hwdev); 748 void iommu_device_unregister(struct iommu_device *iommu); 749 int iommu_device_sysfs_add(struct iommu_device *iommu, 750 struct device *parent, 751 const struct attribute_group **groups, 752 const char *fmt, ...) __printf(4, 5); 753 void iommu_device_sysfs_remove(struct iommu_device *iommu); 754 int iommu_device_link(struct iommu_device *iommu, struct device *link); 755 void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 756 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 757 758 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 759 { 760 return (struct iommu_device *)dev_get_drvdata(dev); 761 } 762 763 /** 764 * iommu_get_iommu_dev - Get iommu_device for a device 765 * @dev: an end-point device 766 * 767 * Note that this function must be called from the iommu_ops 768 * to retrieve the iommu_device for a device, which the core code 769 * guarentees it will not invoke the op without an attached iommu. 770 */ 771 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev) 772 { 773 return dev->iommu->iommu_dev; 774 } 775 776 #define iommu_get_iommu_dev(dev, type, member) \ 777 container_of(__iommu_get_iommu_dev(dev), type, member) 778 779 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 780 { 781 *gather = (struct iommu_iotlb_gather) { 782 .start = ULONG_MAX, 783 .freelist = LIST_HEAD_INIT(gather->freelist), 784 }; 785 } 786 787 extern int bus_iommu_probe(const struct bus_type *bus); 788 extern bool iommu_present(const struct bus_type *bus); 789 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 790 extern bool iommu_group_has_isolated_msi(struct iommu_group *group); 791 extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); 792 struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); 793 extern void iommu_domain_free(struct iommu_domain *domain); 794 extern int iommu_attach_device(struct iommu_domain *domain, 795 struct device *dev); 796 extern void iommu_detach_device(struct iommu_domain *domain, 797 struct device *dev); 798 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 799 struct device *dev, ioasid_t pasid); 800 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 801 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 802 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 803 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 804 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 805 size_t size); 806 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 807 unsigned long iova, size_t size, 808 struct iommu_iotlb_gather *iotlb_gather); 809 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 810 struct scatterlist *sg, unsigned int nents, 811 int prot, gfp_t gfp); 812 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 813 extern void iommu_set_fault_handler(struct iommu_domain *domain, 814 iommu_fault_handler_t handler, void *token); 815 816 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 817 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 818 extern void iommu_set_default_passthrough(bool cmd_line); 819 extern void iommu_set_default_translated(bool cmd_line); 820 extern bool iommu_default_passthrough(void); 821 extern struct iommu_resv_region * 822 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 823 enum iommu_resv_type type, gfp_t gfp); 824 extern int iommu_get_group_resv_regions(struct iommu_group *group, 825 struct list_head *head); 826 827 extern int iommu_attach_group(struct iommu_domain *domain, 828 struct iommu_group *group); 829 extern void iommu_detach_group(struct iommu_domain *domain, 830 struct iommu_group *group); 831 extern struct iommu_group *iommu_group_alloc(void); 832 extern void *iommu_group_get_iommudata(struct iommu_group *group); 833 extern void iommu_group_set_iommudata(struct iommu_group *group, 834 void *iommu_data, 835 void (*release)(void *iommu_data)); 836 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 837 extern int iommu_group_add_device(struct iommu_group *group, 838 struct device *dev); 839 extern void iommu_group_remove_device(struct device *dev); 840 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 841 int (*fn)(struct device *, void *)); 842 extern struct iommu_group *iommu_group_get(struct device *dev); 843 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 844 extern void iommu_group_put(struct iommu_group *group); 845 846 extern int iommu_group_id(struct iommu_group *group); 847 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 848 849 int iommu_enable_nesting(struct iommu_domain *domain); 850 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 851 unsigned long quirks); 852 853 void iommu_set_dma_strict(void); 854 855 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 856 unsigned long iova, int flags); 857 858 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 859 { 860 if (domain->ops->flush_iotlb_all) 861 domain->ops->flush_iotlb_all(domain); 862 } 863 864 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 865 struct iommu_iotlb_gather *iotlb_gather) 866 { 867 if (domain->ops->iotlb_sync) 868 domain->ops->iotlb_sync(domain, iotlb_gather); 869 870 iommu_iotlb_gather_init(iotlb_gather); 871 } 872 873 /** 874 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 875 * 876 * @gather: TLB gather data 877 * @iova: start of page to invalidate 878 * @size: size of page to invalidate 879 * 880 * Helper for IOMMU drivers to check whether a new range and the gathered range 881 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 882 * than merging the two, which might lead to unnecessary invalidations. 883 */ 884 static inline 885 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 886 unsigned long iova, size_t size) 887 { 888 unsigned long start = iova, end = start + size - 1; 889 890 return gather->end != 0 && 891 (end + 1 < gather->start || start > gather->end + 1); 892 } 893 894 895 /** 896 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 897 * @gather: TLB gather data 898 * @iova: start of page to invalidate 899 * @size: size of page to invalidate 900 * 901 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 902 * where only the address range matters, and simply minimising intermediate 903 * syncs is preferred. 904 */ 905 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 906 unsigned long iova, size_t size) 907 { 908 unsigned long end = iova + size - 1; 909 910 if (gather->start > iova) 911 gather->start = iova; 912 if (gather->end < end) 913 gather->end = end; 914 } 915 916 /** 917 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 918 * @domain: IOMMU domain to be invalidated 919 * @gather: TLB gather data 920 * @iova: start of page to invalidate 921 * @size: size of page to invalidate 922 * 923 * Helper for IOMMU drivers to build invalidation commands based on individual 924 * pages, or with page size/table level hints which cannot be gathered if they 925 * differ. 926 */ 927 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 928 struct iommu_iotlb_gather *gather, 929 unsigned long iova, size_t size) 930 { 931 /* 932 * If the new page is disjoint from the current range or is mapped at 933 * a different granularity, then sync the TLB so that the gather 934 * structure can be rewritten. 935 */ 936 if ((gather->pgsize && gather->pgsize != size) || 937 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 938 iommu_iotlb_sync(domain, gather); 939 940 gather->pgsize = size; 941 iommu_iotlb_gather_add_range(gather, iova, size); 942 } 943 944 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 945 { 946 return gather && gather->queued; 947 } 948 949 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 950 struct iova_bitmap *bitmap, 951 struct iommu_iotlb_gather *gather) 952 { 953 if (gather) 954 iommu_iotlb_gather_init(gather); 955 956 dirty->bitmap = bitmap; 957 dirty->gather = gather; 958 } 959 960 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 961 unsigned long iova, 962 unsigned long length) 963 { 964 if (dirty->bitmap) 965 iova_bitmap_set(dirty->bitmap, iova, length); 966 967 if (dirty->gather) 968 iommu_iotlb_gather_add_range(dirty->gather, iova, length); 969 } 970 971 /* PCI device grouping function */ 972 extern struct iommu_group *pci_device_group(struct device *dev); 973 /* Generic device grouping function */ 974 extern struct iommu_group *generic_device_group(struct device *dev); 975 /* FSL-MC device grouping function */ 976 struct iommu_group *fsl_mc_device_group(struct device *dev); 977 extern struct iommu_group *generic_single_device_group(struct device *dev); 978 979 /** 980 * struct iommu_fwspec - per-device IOMMU instance data 981 * @iommu_fwnode: firmware handle for this device's IOMMU 982 * @flags: IOMMU_FWSPEC_* flags 983 * @num_ids: number of associated device IDs 984 * @ids: IDs which this device may present to the IOMMU 985 * 986 * Note that the IDs (and any other information, really) stored in this structure should be 987 * considered private to the IOMMU device driver and are not to be used directly by IOMMU 988 * consumers. 989 */ 990 struct iommu_fwspec { 991 struct fwnode_handle *iommu_fwnode; 992 u32 flags; 993 unsigned int num_ids; 994 u32 ids[]; 995 }; 996 997 /* ATS is supported */ 998 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 999 1000 /* 1001 * An iommu attach handle represents a relationship between an iommu domain 1002 * and a PASID or RID of a device. It is allocated and managed by the component 1003 * that manages the domain and is stored in the iommu group during the time the 1004 * domain is attached. 1005 */ 1006 struct iommu_attach_handle { 1007 struct iommu_domain *domain; 1008 }; 1009 1010 /** 1011 * struct iommu_sva - handle to a device-mm bond 1012 */ 1013 struct iommu_sva { 1014 struct iommu_attach_handle handle; 1015 struct device *dev; 1016 refcount_t users; 1017 }; 1018 1019 struct iommu_mm_data { 1020 u32 pasid; 1021 struct list_head sva_domains; 1022 }; 1023 1024 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode); 1025 void iommu_fwspec_free(struct device *dev); 1026 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); 1027 1028 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1029 { 1030 if (dev->iommu) 1031 return dev->iommu->fwspec; 1032 else 1033 return NULL; 1034 } 1035 1036 static inline void dev_iommu_fwspec_set(struct device *dev, 1037 struct iommu_fwspec *fwspec) 1038 { 1039 dev->iommu->fwspec = fwspec; 1040 } 1041 1042 static inline void *dev_iommu_priv_get(struct device *dev) 1043 { 1044 if (dev->iommu) 1045 return dev->iommu->priv; 1046 else 1047 return NULL; 1048 } 1049 1050 void dev_iommu_priv_set(struct device *dev, void *priv); 1051 1052 extern struct mutex iommu_probe_device_lock; 1053 int iommu_probe_device(struct device *dev); 1054 1055 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 1056 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 1057 1058 int iommu_device_use_default_domain(struct device *dev); 1059 void iommu_device_unuse_default_domain(struct device *dev); 1060 1061 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 1062 void iommu_group_release_dma_owner(struct iommu_group *group); 1063 bool iommu_group_dma_owner_claimed(struct iommu_group *group); 1064 1065 int iommu_device_claim_dma_owner(struct device *dev, void *owner); 1066 void iommu_device_release_dma_owner(struct device *dev); 1067 1068 int iommu_attach_device_pasid(struct iommu_domain *domain, 1069 struct device *dev, ioasid_t pasid, 1070 struct iommu_attach_handle *handle); 1071 void iommu_detach_device_pasid(struct iommu_domain *domain, 1072 struct device *dev, ioasid_t pasid); 1073 ioasid_t iommu_alloc_global_pasid(struct device *dev); 1074 void iommu_free_global_pasid(ioasid_t pasid); 1075 #else /* CONFIG_IOMMU_API */ 1076 1077 struct iommu_ops {}; 1078 struct iommu_group {}; 1079 struct iommu_fwspec {}; 1080 struct iommu_device {}; 1081 struct iommu_fault_param {}; 1082 struct iommu_iotlb_gather {}; 1083 struct iommu_dirty_bitmap {}; 1084 struct iommu_dirty_ops {}; 1085 1086 static inline bool iommu_present(const struct bus_type *bus) 1087 { 1088 return false; 1089 } 1090 1091 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1092 { 1093 return false; 1094 } 1095 1096 static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 1097 { 1098 return NULL; 1099 } 1100 1101 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) 1102 { 1103 return ERR_PTR(-ENODEV); 1104 } 1105 1106 static inline void iommu_domain_free(struct iommu_domain *domain) 1107 { 1108 } 1109 1110 static inline int iommu_attach_device(struct iommu_domain *domain, 1111 struct device *dev) 1112 { 1113 return -ENODEV; 1114 } 1115 1116 static inline void iommu_detach_device(struct iommu_domain *domain, 1117 struct device *dev) 1118 { 1119 } 1120 1121 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 1122 { 1123 return NULL; 1124 } 1125 1126 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 1127 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 1128 { 1129 return -ENODEV; 1130 } 1131 1132 static inline size_t iommu_unmap(struct iommu_domain *domain, 1133 unsigned long iova, size_t size) 1134 { 1135 return 0; 1136 } 1137 1138 static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 1139 unsigned long iova, int gfp_order, 1140 struct iommu_iotlb_gather *iotlb_gather) 1141 { 1142 return 0; 1143 } 1144 1145 static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 1146 unsigned long iova, struct scatterlist *sg, 1147 unsigned int nents, int prot, gfp_t gfp) 1148 { 1149 return -ENODEV; 1150 } 1151 1152 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 1153 { 1154 } 1155 1156 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 1157 struct iommu_iotlb_gather *iotlb_gather) 1158 { 1159 } 1160 1161 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1162 { 1163 return 0; 1164 } 1165 1166 static inline void iommu_set_fault_handler(struct iommu_domain *domain, 1167 iommu_fault_handler_t handler, void *token) 1168 { 1169 } 1170 1171 static inline void iommu_get_resv_regions(struct device *dev, 1172 struct list_head *list) 1173 { 1174 } 1175 1176 static inline void iommu_put_resv_regions(struct device *dev, 1177 struct list_head *list) 1178 { 1179 } 1180 1181 static inline int iommu_get_group_resv_regions(struct iommu_group *group, 1182 struct list_head *head) 1183 { 1184 return -ENODEV; 1185 } 1186 1187 static inline void iommu_set_default_passthrough(bool cmd_line) 1188 { 1189 } 1190 1191 static inline void iommu_set_default_translated(bool cmd_line) 1192 { 1193 } 1194 1195 static inline bool iommu_default_passthrough(void) 1196 { 1197 return true; 1198 } 1199 1200 static inline int iommu_attach_group(struct iommu_domain *domain, 1201 struct iommu_group *group) 1202 { 1203 return -ENODEV; 1204 } 1205 1206 static inline void iommu_detach_group(struct iommu_domain *domain, 1207 struct iommu_group *group) 1208 { 1209 } 1210 1211 static inline struct iommu_group *iommu_group_alloc(void) 1212 { 1213 return ERR_PTR(-ENODEV); 1214 } 1215 1216 static inline void *iommu_group_get_iommudata(struct iommu_group *group) 1217 { 1218 return NULL; 1219 } 1220 1221 static inline void iommu_group_set_iommudata(struct iommu_group *group, 1222 void *iommu_data, 1223 void (*release)(void *iommu_data)) 1224 { 1225 } 1226 1227 static inline int iommu_group_set_name(struct iommu_group *group, 1228 const char *name) 1229 { 1230 return -ENODEV; 1231 } 1232 1233 static inline int iommu_group_add_device(struct iommu_group *group, 1234 struct device *dev) 1235 { 1236 return -ENODEV; 1237 } 1238 1239 static inline void iommu_group_remove_device(struct device *dev) 1240 { 1241 } 1242 1243 static inline int iommu_group_for_each_dev(struct iommu_group *group, 1244 void *data, 1245 int (*fn)(struct device *, void *)) 1246 { 1247 return -ENODEV; 1248 } 1249 1250 static inline struct iommu_group *iommu_group_get(struct device *dev) 1251 { 1252 return NULL; 1253 } 1254 1255 static inline void iommu_group_put(struct iommu_group *group) 1256 { 1257 } 1258 1259 static inline int iommu_group_id(struct iommu_group *group) 1260 { 1261 return -ENODEV; 1262 } 1263 1264 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 1265 unsigned long quirks) 1266 { 1267 return 0; 1268 } 1269 1270 static inline int iommu_device_register(struct iommu_device *iommu, 1271 const struct iommu_ops *ops, 1272 struct device *hwdev) 1273 { 1274 return -ENODEV; 1275 } 1276 1277 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 1278 { 1279 return NULL; 1280 } 1281 1282 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 1283 { 1284 } 1285 1286 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 1287 struct iommu_iotlb_gather *gather, 1288 unsigned long iova, size_t size) 1289 { 1290 } 1291 1292 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 1293 { 1294 return false; 1295 } 1296 1297 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 1298 struct iova_bitmap *bitmap, 1299 struct iommu_iotlb_gather *gather) 1300 { 1301 } 1302 1303 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 1304 unsigned long iova, 1305 unsigned long length) 1306 { 1307 } 1308 1309 static inline void iommu_device_unregister(struct iommu_device *iommu) 1310 { 1311 } 1312 1313 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 1314 struct device *parent, 1315 const struct attribute_group **groups, 1316 const char *fmt, ...) 1317 { 1318 return -ENODEV; 1319 } 1320 1321 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 1322 { 1323 } 1324 1325 static inline int iommu_device_link(struct device *dev, struct device *link) 1326 { 1327 return -EINVAL; 1328 } 1329 1330 static inline void iommu_device_unlink(struct device *dev, struct device *link) 1331 { 1332 } 1333 1334 static inline int iommu_fwspec_init(struct device *dev, 1335 struct fwnode_handle *iommu_fwnode) 1336 { 1337 return -ENODEV; 1338 } 1339 1340 static inline void iommu_fwspec_free(struct device *dev) 1341 { 1342 } 1343 1344 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 1345 int num_ids) 1346 { 1347 return -ENODEV; 1348 } 1349 1350 static inline int 1351 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1352 { 1353 return -ENODEV; 1354 } 1355 1356 static inline int 1357 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1358 { 1359 return -ENODEV; 1360 } 1361 1362 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1363 { 1364 return NULL; 1365 } 1366 1367 static inline int iommu_device_use_default_domain(struct device *dev) 1368 { 1369 return 0; 1370 } 1371 1372 static inline void iommu_device_unuse_default_domain(struct device *dev) 1373 { 1374 } 1375 1376 static inline int 1377 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1378 { 1379 return -ENODEV; 1380 } 1381 1382 static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1383 { 1384 } 1385 1386 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1387 { 1388 return false; 1389 } 1390 1391 static inline void iommu_device_release_dma_owner(struct device *dev) 1392 { 1393 } 1394 1395 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) 1396 { 1397 return -ENODEV; 1398 } 1399 1400 static inline int iommu_attach_device_pasid(struct iommu_domain *domain, 1401 struct device *dev, ioasid_t pasid, 1402 struct iommu_attach_handle *handle) 1403 { 1404 return -ENODEV; 1405 } 1406 1407 static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1408 struct device *dev, ioasid_t pasid) 1409 { 1410 } 1411 1412 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) 1413 { 1414 return IOMMU_PASID_INVALID; 1415 } 1416 1417 static inline void iommu_free_global_pasid(ioasid_t pasid) {} 1418 #endif /* CONFIG_IOMMU_API */ 1419 1420 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) 1421 void iommu_group_mutex_assert(struct device *dev); 1422 #else 1423 static inline void iommu_group_mutex_assert(struct device *dev) 1424 { 1425 } 1426 #endif 1427 1428 /** 1429 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1430 * @domain: The IOMMU domain to perform the mapping 1431 * @iova: The start address to map the buffer 1432 * @sgt: The sg_table object describing the buffer 1433 * @prot: IOMMU protection bits 1434 * 1435 * Creates a mapping at @iova for the buffer described by a scatterlist 1436 * stored in the given sg_table object in the provided IOMMU domain. 1437 */ 1438 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, 1439 unsigned long iova, struct sg_table *sgt, int prot) 1440 { 1441 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, 1442 GFP_KERNEL); 1443 } 1444 1445 #ifdef CONFIG_IOMMU_DEBUGFS 1446 extern struct dentry *iommu_debugfs_dir; 1447 void iommu_debugfs_setup(void); 1448 #else 1449 static inline void iommu_debugfs_setup(void) {} 1450 #endif 1451 1452 #ifdef CONFIG_IOMMU_DMA 1453 #include <linux/msi.h> 1454 1455 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1456 1457 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1458 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); 1459 1460 #else /* CONFIG_IOMMU_DMA */ 1461 1462 struct msi_desc; 1463 struct msi_msg; 1464 1465 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1466 { 1467 return -ENODEV; 1468 } 1469 1470 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1471 { 1472 return 0; 1473 } 1474 1475 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1476 { 1477 } 1478 1479 #endif /* CONFIG_IOMMU_DMA */ 1480 1481 /* 1482 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into 1483 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents 1484 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. 1485 */ 1486 #define TEGRA_STREAM_ID_BYPASS 0x7f 1487 1488 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) 1489 { 1490 #ifdef CONFIG_IOMMU_API 1491 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1492 1493 if (fwspec && fwspec->num_ids == 1) { 1494 *stream_id = fwspec->ids[0] & 0xffff; 1495 return true; 1496 } 1497 #endif 1498 1499 return false; 1500 } 1501 1502 #ifdef CONFIG_IOMMU_MM_DATA 1503 static inline void mm_pasid_init(struct mm_struct *mm) 1504 { 1505 /* 1506 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes 1507 * the new mm and the old one point to a same iommu_mm instance. When either 1508 * one of the two mms gets released, the iommu_mm instance is freed, leaving 1509 * the other mm running into a use-after-free/double-free problem. To avoid 1510 * the problem, zeroing the iommu_mm pointer of a new mm is needed here. 1511 */ 1512 mm->iommu_mm = NULL; 1513 } 1514 1515 static inline bool mm_valid_pasid(struct mm_struct *mm) 1516 { 1517 return READ_ONCE(mm->iommu_mm); 1518 } 1519 1520 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) 1521 { 1522 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm); 1523 1524 if (!iommu_mm) 1525 return IOMMU_PASID_INVALID; 1526 return iommu_mm->pasid; 1527 } 1528 1529 void mm_pasid_drop(struct mm_struct *mm); 1530 struct iommu_sva *iommu_sva_bind_device(struct device *dev, 1531 struct mm_struct *mm); 1532 void iommu_sva_unbind_device(struct iommu_sva *handle); 1533 u32 iommu_sva_get_pasid(struct iommu_sva *handle); 1534 #else 1535 static inline struct iommu_sva * 1536 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 1537 { 1538 return ERR_PTR(-ENODEV); 1539 } 1540 1541 static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1542 { 1543 } 1544 1545 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1546 { 1547 return IOMMU_PASID_INVALID; 1548 } 1549 static inline void mm_pasid_init(struct mm_struct *mm) {} 1550 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } 1551 1552 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) 1553 { 1554 return IOMMU_PASID_INVALID; 1555 } 1556 1557 static inline void mm_pasid_drop(struct mm_struct *mm) {} 1558 #endif /* CONFIG_IOMMU_SVA */ 1559 1560 #ifdef CONFIG_IOMMU_IOPF 1561 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); 1562 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev); 1563 int iopf_queue_flush_dev(struct device *dev); 1564 struct iopf_queue *iopf_queue_alloc(const char *name); 1565 void iopf_queue_free(struct iopf_queue *queue); 1566 int iopf_queue_discard_partial(struct iopf_queue *queue); 1567 void iopf_free_group(struct iopf_group *group); 1568 void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); 1569 void iopf_group_response(struct iopf_group *group, 1570 enum iommu_page_response_code status); 1571 #else 1572 static inline int 1573 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) 1574 { 1575 return -ENODEV; 1576 } 1577 1578 static inline void 1579 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) 1580 { 1581 } 1582 1583 static inline int iopf_queue_flush_dev(struct device *dev) 1584 { 1585 return -ENODEV; 1586 } 1587 1588 static inline struct iopf_queue *iopf_queue_alloc(const char *name) 1589 { 1590 return NULL; 1591 } 1592 1593 static inline void iopf_queue_free(struct iopf_queue *queue) 1594 { 1595 } 1596 1597 static inline int iopf_queue_discard_partial(struct iopf_queue *queue) 1598 { 1599 return -ENODEV; 1600 } 1601 1602 static inline void iopf_free_group(struct iopf_group *group) 1603 { 1604 } 1605 1606 static inline void 1607 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) 1608 { 1609 } 1610 1611 static inline void iopf_group_response(struct iopf_group *group, 1612 enum iommu_page_response_code status) 1613 { 1614 } 1615 #endif /* CONFIG_IOMMU_IOPF */ 1616 #endif /* __LINUX_IOMMU_H */ 1617
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.