1 /* 1 /* 2 * Copyright © 2014-2018 Broadcom 2 * Copyright © 2014-2018 Broadcom 3 * 3 * 4 * Permission is hereby granted, free of charg 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated docume 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, pub 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions: 10 * 10 * 11 * The above copyright notice and this permiss 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 13 * Software. 14 * 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT W 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE W 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINF 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFT 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE. 22 */ 22 */ 23 23 24 #ifndef _V3D_DRM_H_ 24 #ifndef _V3D_DRM_H_ 25 #define _V3D_DRM_H_ 25 #define _V3D_DRM_H_ 26 26 27 #include "drm.h" 27 #include "drm.h" 28 28 29 #if defined(__cplusplus) 29 #if defined(__cplusplus) 30 extern "C" { 30 extern "C" { 31 #endif 31 #endif 32 32 33 #define DRM_V3D_SUBMIT_CL 33 #define DRM_V3D_SUBMIT_CL 0x00 34 #define DRM_V3D_WAIT_BO 34 #define DRM_V3D_WAIT_BO 0x01 35 #define DRM_V3D_CREATE_BO 35 #define DRM_V3D_CREATE_BO 0x02 36 #define DRM_V3D_MMAP_BO 36 #define DRM_V3D_MMAP_BO 0x03 37 #define DRM_V3D_GET_PARAM 37 #define DRM_V3D_GET_PARAM 0x04 38 #define DRM_V3D_GET_BO_OFFSET 38 #define DRM_V3D_GET_BO_OFFSET 0x05 39 #define DRM_V3D_SUBMIT_TFU 39 #define DRM_V3D_SUBMIT_TFU 0x06 40 #define DRM_V3D_SUBMIT_CSD 40 #define DRM_V3D_SUBMIT_CSD 0x07 41 #define DRM_V3D_PERFMON_CREATE 41 #define DRM_V3D_PERFMON_CREATE 0x08 42 #define DRM_V3D_PERFMON_DESTROY 42 #define DRM_V3D_PERFMON_DESTROY 0x09 43 #define DRM_V3D_PERFMON_GET_VALUES 43 #define DRM_V3D_PERFMON_GET_VALUES 0x0a 44 #define DRM_V3D_SUBMIT_CPU 44 #define DRM_V3D_SUBMIT_CPU 0x0b 45 #define DRM_V3D_PERFMON_GET_COUNTER 45 #define DRM_V3D_PERFMON_GET_COUNTER 0x0c 46 46 47 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_ 47 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) 48 #define DRM_IOCTL_V3D_WAIT_BO DRM_ 48 #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) 49 #define DRM_IOCTL_V3D_CREATE_BO DRM_ 49 #define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo) 50 #define DRM_IOCTL_V3D_MMAP_BO DRM_ 50 #define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo) 51 #define DRM_IOCTL_V3D_GET_PARAM DRM_ 51 #define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) 52 #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_ 52 #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) 53 #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_ 53 #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu) 54 #define DRM_IOCTL_V3D_SUBMIT_CSD DRM_ 54 #define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd) 55 #define DRM_IOCTL_V3D_PERFMON_CREATE DRM_ 55 #define DRM_IOCTL_V3D_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_CREATE, \ 56 56 struct drm_v3d_perfmon_create) 57 #define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_ 57 #define DRM_IOCTL_V3D_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_DESTROY, \ 58 58 struct drm_v3d_perfmon_destroy) 59 #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_ 59 #define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \ 60 60 struct drm_v3d_perfmon_get_values) 61 #define DRM_IOCTL_V3D_SUBMIT_CPU DRM_ 61 #define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu) 62 #define DRM_IOCTL_V3D_PERFMON_GET_COUNTER DRM_ 62 #define DRM_IOCTL_V3D_PERFMON_GET_COUNTER DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_COUNTER, \ 63 63 struct drm_v3d_perfmon_get_counter) 64 64 65 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 65 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01 66 #define DRM_V3D_SUBMIT_EXTENSION 66 #define DRM_V3D_SUBMIT_EXTENSION 0x02 67 67 68 /* struct drm_v3d_extension - ioctl extensions 68 /* struct drm_v3d_extension - ioctl extensions 69 * 69 * 70 * Linked-list of generic extensions where the 70 * Linked-list of generic extensions where the id identify which struct is 71 * pointed by ext_data. Therefore, DRM_V3D_EXT 71 * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify 72 * the extension type. 72 * the extension type. 73 */ 73 */ 74 struct drm_v3d_extension { 74 struct drm_v3d_extension { 75 __u64 next; 75 __u64 next; 76 __u32 id; 76 __u32 id; 77 #define DRM_V3D_EXT_ID_MULTI_SYNC 77 #define DRM_V3D_EXT_ID_MULTI_SYNC 0x01 78 #define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 78 #define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02 79 #define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 79 #define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03 80 #define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUE 80 #define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04 81 #define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUER 81 #define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05 82 #define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_Q 82 #define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06 83 #define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QU 83 #define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07 84 __u32 flags; /* mbz */ 84 __u32 flags; /* mbz */ 85 }; 85 }; 86 86 87 /* struct drm_v3d_sem - wait/signal semaphore 87 /* struct drm_v3d_sem - wait/signal semaphore 88 * 88 * 89 * If binary semaphore, it only takes syncobj 89 * If binary semaphore, it only takes syncobj handle and ignores flags and 90 * point fields. Point is defined for timeline 90 * point fields. Point is defined for timeline syncobj feature. 91 */ 91 */ 92 struct drm_v3d_sem { 92 struct drm_v3d_sem { 93 __u32 handle; /* syncobj */ 93 __u32 handle; /* syncobj */ 94 /* rsv below, for future uses */ 94 /* rsv below, for future uses */ 95 __u32 flags; 95 __u32 flags; 96 __u64 point; /* for timeline sem supp 96 __u64 point; /* for timeline sem support */ 97 __u64 mbz[2]; /* must be zero, rsv */ 97 __u64 mbz[2]; /* must be zero, rsv */ 98 }; 98 }; 99 99 100 /* Enum for each of the V3D queues. */ 100 /* Enum for each of the V3D queues. */ 101 enum v3d_queue { 101 enum v3d_queue { 102 V3D_BIN, 102 V3D_BIN, 103 V3D_RENDER, 103 V3D_RENDER, 104 V3D_TFU, 104 V3D_TFU, 105 V3D_CSD, 105 V3D_CSD, 106 V3D_CACHE_CLEAN, 106 V3D_CACHE_CLEAN, 107 V3D_CPU, 107 V3D_CPU, 108 }; 108 }; 109 109 110 /** 110 /** 111 * struct drm_v3d_multi_sync - ioctl extension 111 * struct drm_v3d_multi_sync - ioctl extension to add support multiples 112 * syncobjs for commands submission. 112 * syncobjs for commands submission. 113 * 113 * 114 * When an extension of DRM_V3D_EXT_ID_MULTI_S 114 * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to 115 * this extension to define wait and signal de 115 * this extension to define wait and signal dependencies, instead of single 116 * in/out sync entries on submitting commands. 116 * in/out sync entries on submitting commands. The field flags is used to 117 * determine the stage to set wait dependencie 117 * determine the stage to set wait dependencies. 118 */ 118 */ 119 struct drm_v3d_multi_sync { 119 struct drm_v3d_multi_sync { 120 struct drm_v3d_extension base; 120 struct drm_v3d_extension base; 121 /* Array of wait and signal semaphores 121 /* Array of wait and signal semaphores */ 122 __u64 in_syncs; 122 __u64 in_syncs; 123 __u64 out_syncs; 123 __u64 out_syncs; 124 124 125 /* Number of entries */ 125 /* Number of entries */ 126 __u32 in_sync_count; 126 __u32 in_sync_count; 127 __u32 out_sync_count; 127 __u32 out_sync_count; 128 128 129 /* set the stage (v3d_queue) to sync * 129 /* set the stage (v3d_queue) to sync */ 130 __u32 wait_stage; 130 __u32 wait_stage; 131 131 132 __u32 pad; /* mbz */ 132 __u32 pad; /* mbz */ 133 }; 133 }; 134 134 135 /** 135 /** 136 * struct drm_v3d_submit_cl - ioctl argument f 136 * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D 137 * engine. 137 * engine. 138 * 138 * 139 * This asks the kernel to have the GPU execut 139 * This asks the kernel to have the GPU execute an optional binner 140 * command list, and a render command list. 140 * command list, and a render command list. 141 * 141 * 142 * The L1T, slice, L2C, L2T, and GCA caches wi 142 * The L1T, slice, L2C, L2T, and GCA caches will be flushed before 143 * each CL executes. The VCD cache should be 143 * each CL executes. The VCD cache should be flushed (if necessary) 144 * by the submitted CLs. The TLB writes are g 144 * by the submitted CLs. The TLB writes are guaranteed to have been 145 * flushed by the time the render done IRQ hap 145 * flushed by the time the render done IRQ happens, which is the 146 * trigger for out_sync. Any dirtying of cach 146 * trigger for out_sync. Any dirtying of cachelines by the job (only 147 * possible using TMU writes) must be flushed 147 * possible using TMU writes) must be flushed by the caller using the 148 * DRM_V3D_SUBMIT_CL_FLUSH_CACHE_FLAG flag. 148 * DRM_V3D_SUBMIT_CL_FLUSH_CACHE_FLAG flag. 149 */ 149 */ 150 struct drm_v3d_submit_cl { 150 struct drm_v3d_submit_cl { 151 /* Pointer to the binner command list. 151 /* Pointer to the binner command list. 152 * 152 * 153 * This is the first set of commands e 153 * This is the first set of commands executed, which runs the 154 * coordinate shader to determine wher 154 * coordinate shader to determine where primitives land on the screen, 155 * then writes out the state updates a 155 * then writes out the state updates and draw calls necessary per tile 156 * to the tile allocation BO. 156 * to the tile allocation BO. 157 * 157 * 158 * This BCL will block on any previous 158 * This BCL will block on any previous BCL submitted on the 159 * same FD, but not on any RCL or BCLs 159 * same FD, but not on any RCL or BCLs submitted by other 160 * clients -- that is left up to the s 160 * clients -- that is left up to the submitter to control 161 * using in_sync_bcl if necessary. 161 * using in_sync_bcl if necessary. 162 */ 162 */ 163 __u32 bcl_start; 163 __u32 bcl_start; 164 164 165 /** End address of the BCL (first byte 165 /** End address of the BCL (first byte after the BCL) */ 166 __u32 bcl_end; 166 __u32 bcl_end; 167 167 168 /* Offset of the render command list. 168 /* Offset of the render command list. 169 * 169 * 170 * This is the second set of commands 170 * This is the second set of commands executed, which will either 171 * execute the tiles that have been se 171 * execute the tiles that have been set up by the BCL, or a fixed set 172 * of tiles (in the case of RCL-only b 172 * of tiles (in the case of RCL-only blits). 173 * 173 * 174 * This RCL will block on this submit' 174 * This RCL will block on this submit's BCL, and any previous 175 * RCL submitted on the same FD, but n 175 * RCL submitted on the same FD, but not on any RCL or BCLs 176 * submitted by other clients -- that 176 * submitted by other clients -- that is left up to the 177 * submitter to control using in_sync_ 177 * submitter to control using in_sync_rcl if necessary. 178 */ 178 */ 179 __u32 rcl_start; 179 __u32 rcl_start; 180 180 181 /** End address of the RCL (first byte 181 /** End address of the RCL (first byte after the RCL) */ 182 __u32 rcl_end; 182 __u32 rcl_end; 183 183 184 /** An optional sync object to wait on 184 /** An optional sync object to wait on before starting the BCL. */ 185 __u32 in_sync_bcl; 185 __u32 in_sync_bcl; 186 /** An optional sync object to wait on 186 /** An optional sync object to wait on before starting the RCL. */ 187 __u32 in_sync_rcl; 187 __u32 in_sync_rcl; 188 /** An optional sync object to place t 188 /** An optional sync object to place the completion fence in. */ 189 __u32 out_sync; 189 __u32 out_sync; 190 190 191 /* Offset of the tile alloc memory 191 /* Offset of the tile alloc memory 192 * 192 * 193 * This is optional on V3D 3.3 (where 193 * This is optional on V3D 3.3 (where the CL can set the value) but 194 * required on V3D 4.1. 194 * required on V3D 4.1. 195 */ 195 */ 196 __u32 qma; 196 __u32 qma; 197 197 198 /** Size of the tile alloc memory. */ 198 /** Size of the tile alloc memory. */ 199 __u32 qms; 199 __u32 qms; 200 200 201 /** Offset of the tile state data arra 201 /** Offset of the tile state data array. */ 202 __u32 qts; 202 __u32 qts; 203 203 204 /* Pointer to a u32 array of the BOs t 204 /* Pointer to a u32 array of the BOs that are referenced by the job. 205 */ 205 */ 206 __u64 bo_handles; 206 __u64 bo_handles; 207 207 208 /* Number of BO handles passed in (siz 208 /* Number of BO handles passed in (size is that times 4). */ 209 __u32 bo_handle_count; 209 __u32 bo_handle_count; 210 210 211 /* DRM_V3D_SUBMIT_* properties */ 211 /* DRM_V3D_SUBMIT_* properties */ 212 __u32 flags; 212 __u32 flags; 213 213 214 /* ID of the perfmon to attach to this 214 /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 215 __u32 perfmon_id; 215 __u32 perfmon_id; 216 216 217 __u32 pad; 217 __u32 pad; 218 218 219 /* Pointer to an array of ioctl extens 219 /* Pointer to an array of ioctl extensions*/ 220 __u64 extensions; 220 __u64 extensions; 221 }; 221 }; 222 222 223 /** 223 /** 224 * struct drm_v3d_wait_bo - ioctl argument for 224 * struct drm_v3d_wait_bo - ioctl argument for waiting for 225 * completion of the last DRM_V3D_SUBMIT_CL on 225 * completion of the last DRM_V3D_SUBMIT_CL on a BO. 226 * 226 * 227 * This is useful for cases where multiple pro 227 * This is useful for cases where multiple processes might be 228 * rendering to a BO and you want to wait for 228 * rendering to a BO and you want to wait for all rendering to be 229 * completed. 229 * completed. 230 */ 230 */ 231 struct drm_v3d_wait_bo { 231 struct drm_v3d_wait_bo { 232 __u32 handle; 232 __u32 handle; 233 __u32 pad; 233 __u32 pad; 234 __u64 timeout_ns; 234 __u64 timeout_ns; 235 }; 235 }; 236 236 237 /** 237 /** 238 * struct drm_v3d_create_bo - ioctl argument f 238 * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs. 239 * 239 * 240 * There are currently no values for the flags 240 * There are currently no values for the flags argument, but it may be 241 * used in a future extension. 241 * used in a future extension. 242 */ 242 */ 243 struct drm_v3d_create_bo { 243 struct drm_v3d_create_bo { 244 __u32 size; 244 __u32 size; 245 __u32 flags; 245 __u32 flags; 246 /** Returned GEM handle for the BO. */ 246 /** Returned GEM handle for the BO. */ 247 __u32 handle; 247 __u32 handle; 248 /** 248 /** 249 * Returned offset for the BO in the V 249 * Returned offset for the BO in the V3D address space. This offset 250 * is private to the DRM fd and is val 250 * is private to the DRM fd and is valid for the lifetime of the GEM 251 * handle. 251 * handle. 252 * 252 * 253 * This offset value will always be no 253 * This offset value will always be nonzero, since various HW 254 * units treat 0 specially. 254 * units treat 0 specially. 255 */ 255 */ 256 __u32 offset; 256 __u32 offset; 257 }; 257 }; 258 258 259 /** 259 /** 260 * struct drm_v3d_mmap_bo - ioctl argument for 260 * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs. 261 * 261 * 262 * This doesn't actually perform an mmap. Ins 262 * This doesn't actually perform an mmap. Instead, it returns the 263 * offset you need to use in an mmap on the DR 263 * offset you need to use in an mmap on the DRM device node. This 264 * means that tools like valgrind end up knowi 264 * means that tools like valgrind end up knowing about the mapped 265 * memory. 265 * memory. 266 * 266 * 267 * There are currently no values for the flags 267 * There are currently no values for the flags argument, but it may be 268 * used in a future extension. 268 * used in a future extension. 269 */ 269 */ 270 struct drm_v3d_mmap_bo { 270 struct drm_v3d_mmap_bo { 271 /** Handle for the object being mapped 271 /** Handle for the object being mapped. */ 272 __u32 handle; 272 __u32 handle; 273 __u32 flags; 273 __u32 flags; 274 /** offset into the drm node to use fo 274 /** offset into the drm node to use for subsequent mmap call. */ 275 __u64 offset; 275 __u64 offset; 276 }; 276 }; 277 277 278 enum drm_v3d_param { 278 enum drm_v3d_param { 279 DRM_V3D_PARAM_V3D_UIFCFG, 279 DRM_V3D_PARAM_V3D_UIFCFG, 280 DRM_V3D_PARAM_V3D_HUB_IDENT1, 280 DRM_V3D_PARAM_V3D_HUB_IDENT1, 281 DRM_V3D_PARAM_V3D_HUB_IDENT2, 281 DRM_V3D_PARAM_V3D_HUB_IDENT2, 282 DRM_V3D_PARAM_V3D_HUB_IDENT3, 282 DRM_V3D_PARAM_V3D_HUB_IDENT3, 283 DRM_V3D_PARAM_V3D_CORE0_IDENT0, 283 DRM_V3D_PARAM_V3D_CORE0_IDENT0, 284 DRM_V3D_PARAM_V3D_CORE0_IDENT1, 284 DRM_V3D_PARAM_V3D_CORE0_IDENT1, 285 DRM_V3D_PARAM_V3D_CORE0_IDENT2, 285 DRM_V3D_PARAM_V3D_CORE0_IDENT2, 286 DRM_V3D_PARAM_SUPPORTS_TFU, 286 DRM_V3D_PARAM_SUPPORTS_TFU, 287 DRM_V3D_PARAM_SUPPORTS_CSD, 287 DRM_V3D_PARAM_SUPPORTS_CSD, 288 DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, 288 DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH, 289 DRM_V3D_PARAM_SUPPORTS_PERFMON, 289 DRM_V3D_PARAM_SUPPORTS_PERFMON, 290 DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, 290 DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, 291 DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, 291 DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, 292 DRM_V3D_PARAM_MAX_PERF_COUNTERS, 292 DRM_V3D_PARAM_MAX_PERF_COUNTERS, 293 }; 293 }; 294 294 295 struct drm_v3d_get_param { 295 struct drm_v3d_get_param { 296 __u32 param; 296 __u32 param; 297 __u32 pad; 297 __u32 pad; 298 __u64 value; 298 __u64 value; 299 }; 299 }; 300 300 301 /** 301 /** 302 * Returns the offset for the BO in the V3D ad 302 * Returns the offset for the BO in the V3D address space for this DRM fd. 303 * This is the same value returned by drm_v3d_ 303 * This is the same value returned by drm_v3d_create_bo, if that was called 304 * from this DRM fd. 304 * from this DRM fd. 305 */ 305 */ 306 struct drm_v3d_get_bo_offset { 306 struct drm_v3d_get_bo_offset { 307 __u32 handle; 307 __u32 handle; 308 __u32 offset; 308 __u32 offset; 309 }; 309 }; 310 310 311 struct drm_v3d_submit_tfu { 311 struct drm_v3d_submit_tfu { 312 __u32 icfg; 312 __u32 icfg; 313 __u32 iia; 313 __u32 iia; 314 __u32 iis; 314 __u32 iis; 315 __u32 ica; 315 __u32 ica; 316 __u32 iua; 316 __u32 iua; 317 __u32 ioa; 317 __u32 ioa; 318 __u32 ios; 318 __u32 ios; 319 __u32 coef[4]; 319 __u32 coef[4]; 320 /* First handle is the output BO, foll 320 /* First handle is the output BO, following are other inputs. 321 * 0 for unused. 321 * 0 for unused. 322 */ 322 */ 323 __u32 bo_handles[4]; 323 __u32 bo_handles[4]; 324 /* sync object to block on before runn 324 /* sync object to block on before running the TFU job. Each TFU 325 * job will execute in the order submi 325 * job will execute in the order submitted to its FD. Synchronization 326 * against rendering jobs requires usi 326 * against rendering jobs requires using sync objects. 327 */ 327 */ 328 __u32 in_sync; 328 __u32 in_sync; 329 /* Sync object to signal when the TFU 329 /* Sync object to signal when the TFU job is done. */ 330 __u32 out_sync; 330 __u32 out_sync; 331 331 332 __u32 flags; 332 __u32 flags; 333 333 334 /* Pointer to an array of ioctl extens 334 /* Pointer to an array of ioctl extensions*/ 335 __u64 extensions; 335 __u64 extensions; 336 336 337 struct { 337 struct { 338 __u32 ioc; 338 __u32 ioc; 339 __u32 pad; 339 __u32 pad; 340 } v71; 340 } v71; 341 }; 341 }; 342 342 343 /* Submits a compute shader for dispatch. Thi 343 /* Submits a compute shader for dispatch. This job will block on any 344 * previous compute shaders submitted on this 344 * previous compute shaders submitted on this fd, and any other 345 * synchronization must be performed with in_s 345 * synchronization must be performed with in_sync/out_sync. 346 */ 346 */ 347 struct drm_v3d_submit_csd { 347 struct drm_v3d_submit_csd { 348 __u32 cfg[7]; 348 __u32 cfg[7]; 349 __u32 coef[4]; 349 __u32 coef[4]; 350 350 351 /* Pointer to a u32 array of the BOs t 351 /* Pointer to a u32 array of the BOs that are referenced by the job. 352 */ 352 */ 353 __u64 bo_handles; 353 __u64 bo_handles; 354 354 355 /* Number of BO handles passed in (siz 355 /* Number of BO handles passed in (size is that times 4). */ 356 __u32 bo_handle_count; 356 __u32 bo_handle_count; 357 357 358 /* sync object to block on before runn 358 /* sync object to block on before running the CSD job. Each 359 * CSD job will execute in the order s 359 * CSD job will execute in the order submitted to its FD. 360 * Synchronization against rendering/T 360 * Synchronization against rendering/TFU jobs or CSD from 361 * other fds requires using sync objec 361 * other fds requires using sync objects. 362 */ 362 */ 363 __u32 in_sync; 363 __u32 in_sync; 364 /* Sync object to signal when the CSD 364 /* Sync object to signal when the CSD job is done. */ 365 __u32 out_sync; 365 __u32 out_sync; 366 366 367 /* ID of the perfmon to attach to this 367 /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 368 __u32 perfmon_id; 368 __u32 perfmon_id; 369 369 370 /* Pointer to an array of ioctl extens 370 /* Pointer to an array of ioctl extensions*/ 371 __u64 extensions; 371 __u64 extensions; 372 372 373 __u32 flags; 373 __u32 flags; 374 374 375 __u32 pad; 375 __u32 pad; 376 }; 376 }; 377 377 378 /** 378 /** 379 * struct drm_v3d_indirect_csd - ioctl extensi 379 * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an 380 * indirect CSD 380 * indirect CSD 381 * 381 * 382 * When an extension of DRM_V3D_EXT_ID_CPU_IND 382 * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it 383 * points to this extension to define a indire 383 * points to this extension to define a indirect CSD submission. It creates a 384 * CPU job linked to a CSD job. The CPU job wa 384 * CPU job linked to a CSD job. The CPU job waits for the indirect CSD 385 * dependencies and, once they are signaled, i 385 * dependencies and, once they are signaled, it updates the CSD job config 386 * before allowing the CSD job execution. 386 * before allowing the CSD job execution. 387 */ 387 */ 388 struct drm_v3d_indirect_csd { 388 struct drm_v3d_indirect_csd { 389 struct drm_v3d_extension base; 389 struct drm_v3d_extension base; 390 390 391 /* Indirect CSD */ 391 /* Indirect CSD */ 392 struct drm_v3d_submit_csd submit; 392 struct drm_v3d_submit_csd submit; 393 393 394 /* Handle of the indirect BO, that sho 394 /* Handle of the indirect BO, that should be also attached to the 395 * indirect CSD. 395 * indirect CSD. 396 */ 396 */ 397 __u32 indirect; 397 __u32 indirect; 398 398 399 /* Offset within the BO where the work 399 /* Offset within the BO where the workgroup counts are stored */ 400 __u32 offset; 400 __u32 offset; 401 401 402 /* Workgroups size */ 402 /* Workgroups size */ 403 __u32 wg_size; 403 __u32 wg_size; 404 404 405 /* Indices of the uniforms with the wo 405 /* Indices of the uniforms with the workgroup dispatch counts 406 * in the uniform stream. If the unifo 406 * in the uniform stream. If the uniform rewrite is not needed, 407 * the offset must be 0xffffffff. 407 * the offset must be 0xffffffff. 408 */ 408 */ 409 __u32 wg_uniform_offsets[3]; 409 __u32 wg_uniform_offsets[3]; 410 }; 410 }; 411 411 412 /** 412 /** 413 * struct drm_v3d_timestamp_query - ioctl exte 413 * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate 414 * a timestamp query 414 * a timestamp query 415 * 415 * 416 * When an extension DRM_V3D_EXT_ID_TIMESTAMP_ 416 * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to 417 * this extension to define a timestamp query 417 * this extension to define a timestamp query submission. This CPU job will 418 * calculate the timestamp query and update th 418 * calculate the timestamp query and update the query value within the 419 * timestamp BO. Moreover, it will signal the 419 * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate 420 * query availability. 420 * query availability. 421 */ 421 */ 422 struct drm_v3d_timestamp_query { 422 struct drm_v3d_timestamp_query { 423 struct drm_v3d_extension base; 423 struct drm_v3d_extension base; 424 424 425 /* Array of queries' offsets within th 425 /* Array of queries' offsets within the timestamp BO for their value */ 426 __u64 offsets; 426 __u64 offsets; 427 427 428 /* Array of timestamp's syncobjs to in 428 /* Array of timestamp's syncobjs to indicate its availability */ 429 __u64 syncs; 429 __u64 syncs; 430 430 431 /* Number of queries */ 431 /* Number of queries */ 432 __u32 count; 432 __u32 count; 433 433 434 /* mbz */ 434 /* mbz */ 435 __u32 pad; 435 __u32 pad; 436 }; 436 }; 437 437 438 /** 438 /** 439 * struct drm_v3d_reset_timestamp_query - ioct 439 * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to 440 * reset timestamp queries 440 * reset timestamp queries 441 * 441 * 442 * When an extension DRM_V3D_EXT_ID_CPU_RESET_ 442 * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it 443 * points to this extension to define a reset 443 * points to this extension to define a reset timestamp submission. This CPU 444 * job will reset the timestamp queries based 444 * job will reset the timestamp queries based on value offset of the first 445 * query. Moreover, it will reset the timestam 445 * query. Moreover, it will reset the timestamp syncobj to reset query 446 * availability. 446 * availability. 447 */ 447 */ 448 struct drm_v3d_reset_timestamp_query { 448 struct drm_v3d_reset_timestamp_query { 449 struct drm_v3d_extension base; 449 struct drm_v3d_extension base; 450 450 451 /* Array of timestamp's syncobjs to in 451 /* Array of timestamp's syncobjs to indicate its availability */ 452 __u64 syncs; 452 __u64 syncs; 453 453 454 /* Offset of the first query within th 454 /* Offset of the first query within the timestamp BO for its value */ 455 __u32 offset; 455 __u32 offset; 456 456 457 /* Number of queries */ 457 /* Number of queries */ 458 __u32 count; 458 __u32 count; 459 }; 459 }; 460 460 461 /** 461 /** 462 * struct drm_v3d_copy_timestamp_query - ioctl 462 * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy 463 * query results to a buffer 463 * query results to a buffer 464 * 464 * 465 * When an extension DRM_V3D_EXT_ID_CPU_COPY_T 465 * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it 466 * points to this extension to define a copy t 466 * points to this extension to define a copy timestamp query submission. This 467 * CPU job will copy the timestamp queries res 467 * CPU job will copy the timestamp queries results to a BO with the offset 468 * and stride defined in the extension. 468 * and stride defined in the extension. 469 */ 469 */ 470 struct drm_v3d_copy_timestamp_query { 470 struct drm_v3d_copy_timestamp_query { 471 struct drm_v3d_extension base; 471 struct drm_v3d_extension base; 472 472 473 /* Define if should write to buffer us 473 /* Define if should write to buffer using 64 or 32 bits */ 474 __u8 do_64bit; 474 __u8 do_64bit; 475 475 476 /* Define if it can write to buffer ev 476 /* Define if it can write to buffer even if the query is not available */ 477 __u8 do_partial; 477 __u8 do_partial; 478 478 479 /* Define if it should write availabil 479 /* Define if it should write availability bit to buffer */ 480 __u8 availability_bit; 480 __u8 availability_bit; 481 481 482 /* mbz */ 482 /* mbz */ 483 __u8 pad; 483 __u8 pad; 484 484 485 /* Offset of the buffer in the BO */ 485 /* Offset of the buffer in the BO */ 486 __u32 offset; 486 __u32 offset; 487 487 488 /* Stride of the buffer in the BO */ 488 /* Stride of the buffer in the BO */ 489 __u32 stride; 489 __u32 stride; 490 490 491 /* Number of queries */ 491 /* Number of queries */ 492 __u32 count; 492 __u32 count; 493 493 494 /* Array of queries' offsets within th 494 /* Array of queries' offsets within the timestamp BO for their value */ 495 __u64 offsets; 495 __u64 offsets; 496 496 497 /* Array of timestamp's syncobjs to in 497 /* Array of timestamp's syncobjs to indicate its availability */ 498 __u64 syncs; 498 __u64 syncs; 499 }; 499 }; 500 500 501 /** 501 /** 502 * struct drm_v3d_reset_performance_query - io 502 * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to 503 * reset performance queries 503 * reset performance queries 504 * 504 * 505 * When an extension DRM_V3D_EXT_ID_CPU_RESET_ 505 * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it 506 * points to this extension to define a reset 506 * points to this extension to define a reset performance submission. This CPU 507 * job will reset the performance queries by r 507 * job will reset the performance queries by resetting the values of the 508 * performance monitors. Moreover, it will res 508 * performance monitors. Moreover, it will reset the syncobj to reset query 509 * availability. 509 * availability. 510 */ 510 */ 511 struct drm_v3d_reset_performance_query { 511 struct drm_v3d_reset_performance_query { 512 struct drm_v3d_extension base; 512 struct drm_v3d_extension base; 513 513 514 /* Array of performance queries's sync 514 /* Array of performance queries's syncobjs to indicate its availability */ 515 __u64 syncs; 515 __u64 syncs; 516 516 517 /* Number of queries */ 517 /* Number of queries */ 518 __u32 count; 518 __u32 count; 519 519 520 /* Number of performance monitors */ 520 /* Number of performance monitors */ 521 __u32 nperfmons; 521 __u32 nperfmons; 522 522 523 /* Array of u64 user-pointers that poi 523 /* Array of u64 user-pointers that point to an array of kperfmon_ids */ 524 __u64 kperfmon_ids; 524 __u64 kperfmon_ids; 525 }; 525 }; 526 526 527 /** 527 /** 528 * struct drm_v3d_copy_performance_query - ioc 528 * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy 529 * performance query results to a buffer 529 * performance query results to a buffer 530 * 530 * 531 * When an extension DRM_V3D_EXT_ID_CPU_COPY_P 531 * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it 532 * points to this extension to define a copy p 532 * points to this extension to define a copy performance query submission. This 533 * CPU job will copy the performance queries r 533 * CPU job will copy the performance queries results to a BO with the offset 534 * and stride defined in the extension. 534 * and stride defined in the extension. 535 */ 535 */ 536 struct drm_v3d_copy_performance_query { 536 struct drm_v3d_copy_performance_query { 537 struct drm_v3d_extension base; 537 struct drm_v3d_extension base; 538 538 539 /* Define if should write to buffer us 539 /* Define if should write to buffer using 64 or 32 bits */ 540 __u8 do_64bit; 540 __u8 do_64bit; 541 541 542 /* Define if it can write to buffer ev 542 /* Define if it can write to buffer even if the query is not available */ 543 __u8 do_partial; 543 __u8 do_partial; 544 544 545 /* Define if it should write availabil 545 /* Define if it should write availability bit to buffer */ 546 __u8 availability_bit; 546 __u8 availability_bit; 547 547 548 /* mbz */ 548 /* mbz */ 549 __u8 pad; 549 __u8 pad; 550 550 551 /* Offset of the buffer in the BO */ 551 /* Offset of the buffer in the BO */ 552 __u32 offset; 552 __u32 offset; 553 553 554 /* Stride of the buffer in the BO */ 554 /* Stride of the buffer in the BO */ 555 __u32 stride; 555 __u32 stride; 556 556 557 /* Number of performance monitors */ 557 /* Number of performance monitors */ 558 __u32 nperfmons; 558 __u32 nperfmons; 559 559 560 /* Number of performance counters rela 560 /* Number of performance counters related to this query pool */ 561 __u32 ncounters; 561 __u32 ncounters; 562 562 563 /* Number of queries */ 563 /* Number of queries */ 564 __u32 count; 564 __u32 count; 565 565 566 /* Array of performance queries's sync 566 /* Array of performance queries's syncobjs to indicate its availability */ 567 __u64 syncs; 567 __u64 syncs; 568 568 569 /* Array of u64 user-pointers that poi 569 /* Array of u64 user-pointers that point to an array of kperfmon_ids */ 570 __u64 kperfmon_ids; 570 __u64 kperfmon_ids; 571 }; 571 }; 572 572 573 struct drm_v3d_submit_cpu { 573 struct drm_v3d_submit_cpu { 574 /* Pointer to a u32 array of the BOs t 574 /* Pointer to a u32 array of the BOs that are referenced by the job. 575 * 575 * 576 * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 576 * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO, 577 * that contains the workgroup counts. 577 * that contains the workgroup counts. 578 * 578 * 579 * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, 579 * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO, 580 * that will contain the timestamp. 580 * that will contain the timestamp. 581 * 581 * 582 * For DRM_V3D_EXT_ID_CPU_RESET_TIMEST 582 * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only 583 * one BO, that contains the timestamp 583 * one BO, that contains the timestamp. 584 * 584 * 585 * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTA 585 * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two 586 * BOs. The first is the BO where the 586 * BOs. The first is the BO where the timestamp queries will be written 587 * to. The second is the BO that conta 587 * to. The second is the BO that contains the timestamp. 588 * 588 * 589 * For DRM_V3D_EXT_ID_CPU_RESET_PERFOR 589 * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no 590 * BOs. 590 * BOs. 591 * 591 * 592 * For DRM_V3D_EXT_ID_CPU_COPY_PERFORM 592 * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one 593 * BO, where the performance queries w 593 * BO, where the performance queries will be written. 594 */ 594 */ 595 __u64 bo_handles; 595 __u64 bo_handles; 596 596 597 /* Number of BO handles passed in (siz 597 /* Number of BO handles passed in (size is that times 4). */ 598 __u32 bo_handle_count; 598 __u32 bo_handle_count; 599 599 600 __u32 flags; 600 __u32 flags; 601 601 602 /* Pointer to an array of ioctl extens 602 /* Pointer to an array of ioctl extensions*/ 603 __u64 extensions; 603 __u64 extensions; 604 }; 604 }; 605 605 606 /* The performance counters index represented 606 /* The performance counters index represented by this enum are deprecated and 607 * must no longer be used. These counters are 607 * must no longer be used. These counters are only valid for V3D 4.2. 608 * 608 * 609 * In order to check for performance counter i 609 * In order to check for performance counter information, 610 * use DRM_IOCTL_V3D_PERFMON_GET_COUNTER. 610 * use DRM_IOCTL_V3D_PERFMON_GET_COUNTER. 611 * 611 * 612 * Don't use V3D_PERFCNT_NUM to retrieve the m 612 * Don't use V3D_PERFCNT_NUM to retrieve the maximum number of performance 613 * counters. You should use DRM_IOCTL_V3D_GET_ 613 * counters. You should use DRM_IOCTL_V3D_GET_PARAM with the following 614 * parameter: DRM_V3D_PARAM_MAX_PERF_COUNTERS. 614 * parameter: DRM_V3D_PARAM_MAX_PERF_COUNTERS. 615 */ 615 */ 616 enum { 616 enum { 617 V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS 617 V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS, 618 V3D_PERFCNT_FEP_VALID_PRIMS, 618 V3D_PERFCNT_FEP_VALID_PRIMS, 619 V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS, 619 V3D_PERFCNT_FEP_EZ_NFCLIP_QUADS, 620 V3D_PERFCNT_FEP_VALID_QUADS, 620 V3D_PERFCNT_FEP_VALID_QUADS, 621 V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL, 621 V3D_PERFCNT_TLB_QUADS_STENCIL_FAIL, 622 V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL, 622 V3D_PERFCNT_TLB_QUADS_STENCILZ_FAIL, 623 V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS, 623 V3D_PERFCNT_TLB_QUADS_STENCILZ_PASS, 624 V3D_PERFCNT_TLB_QUADS_ZERO_COV, 624 V3D_PERFCNT_TLB_QUADS_ZERO_COV, 625 V3D_PERFCNT_TLB_QUADS_NONZERO_COV, 625 V3D_PERFCNT_TLB_QUADS_NONZERO_COV, 626 V3D_PERFCNT_TLB_QUADS_WRITTEN, 626 V3D_PERFCNT_TLB_QUADS_WRITTEN, 627 V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD 627 V3D_PERFCNT_PTB_PRIM_VIEWPOINT_DISCARD, 628 V3D_PERFCNT_PTB_PRIM_CLIP, 628 V3D_PERFCNT_PTB_PRIM_CLIP, 629 V3D_PERFCNT_PTB_PRIM_REV, 629 V3D_PERFCNT_PTB_PRIM_REV, 630 V3D_PERFCNT_QPU_IDLE_CYCLES, 630 V3D_PERFCNT_QPU_IDLE_CYCLES, 631 V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_C 631 V3D_PERFCNT_QPU_ACTIVE_CYCLES_VERTEX_COORD_USER, 632 V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG, 632 V3D_PERFCNT_QPU_ACTIVE_CYCLES_FRAG, 633 V3D_PERFCNT_QPU_CYCLES_VALID_INSTR, 633 V3D_PERFCNT_QPU_CYCLES_VALID_INSTR, 634 V3D_PERFCNT_QPU_CYCLES_TMU_STALL, 634 V3D_PERFCNT_QPU_CYCLES_TMU_STALL, 635 V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STAL 635 V3D_PERFCNT_QPU_CYCLES_SCOREBOARD_STALL, 636 V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL, 636 V3D_PERFCNT_QPU_CYCLES_VARYINGS_STALL, 637 V3D_PERFCNT_QPU_IC_HIT, 637 V3D_PERFCNT_QPU_IC_HIT, 638 V3D_PERFCNT_QPU_IC_MISS, 638 V3D_PERFCNT_QPU_IC_MISS, 639 V3D_PERFCNT_QPU_UC_HIT, 639 V3D_PERFCNT_QPU_UC_HIT, 640 V3D_PERFCNT_QPU_UC_MISS, 640 V3D_PERFCNT_QPU_UC_MISS, 641 V3D_PERFCNT_TMU_TCACHE_ACCESS, 641 V3D_PERFCNT_TMU_TCACHE_ACCESS, 642 V3D_PERFCNT_TMU_TCACHE_MISS, 642 V3D_PERFCNT_TMU_TCACHE_MISS, 643 V3D_PERFCNT_VPM_VDW_STALL, 643 V3D_PERFCNT_VPM_VDW_STALL, 644 V3D_PERFCNT_VPM_VCD_STALL, 644 V3D_PERFCNT_VPM_VCD_STALL, 645 V3D_PERFCNT_BIN_ACTIVE, 645 V3D_PERFCNT_BIN_ACTIVE, 646 V3D_PERFCNT_RDR_ACTIVE, 646 V3D_PERFCNT_RDR_ACTIVE, 647 V3D_PERFCNT_L2T_HITS, 647 V3D_PERFCNT_L2T_HITS, 648 V3D_PERFCNT_L2T_MISSES, 648 V3D_PERFCNT_L2T_MISSES, 649 V3D_PERFCNT_CYCLE_COUNT, 649 V3D_PERFCNT_CYCLE_COUNT, 650 V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_ 650 V3D_PERFCNT_QPU_CYCLES_STALLED_VERTEX_COORD_USER, 651 V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMEN 651 V3D_PERFCNT_QPU_CYCLES_STALLED_FRAGMENT, 652 V3D_PERFCNT_PTB_PRIMS_BINNED, 652 V3D_PERFCNT_PTB_PRIMS_BINNED, 653 V3D_PERFCNT_AXI_WRITES_WATCH_0, 653 V3D_PERFCNT_AXI_WRITES_WATCH_0, 654 V3D_PERFCNT_AXI_READS_WATCH_0, 654 V3D_PERFCNT_AXI_READS_WATCH_0, 655 V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0, 655 V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_0, 656 V3D_PERFCNT_AXI_READ_STALLS_WATCH_0, 656 V3D_PERFCNT_AXI_READ_STALLS_WATCH_0, 657 V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0, 657 V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_0, 658 V3D_PERFCNT_AXI_READ_BYTES_WATCH_0, 658 V3D_PERFCNT_AXI_READ_BYTES_WATCH_0, 659 V3D_PERFCNT_AXI_WRITES_WATCH_1, 659 V3D_PERFCNT_AXI_WRITES_WATCH_1, 660 V3D_PERFCNT_AXI_READS_WATCH_1, 660 V3D_PERFCNT_AXI_READS_WATCH_1, 661 V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1, 661 V3D_PERFCNT_AXI_WRITE_STALLS_WATCH_1, 662 V3D_PERFCNT_AXI_READ_STALLS_WATCH_1, 662 V3D_PERFCNT_AXI_READ_STALLS_WATCH_1, 663 V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1, 663 V3D_PERFCNT_AXI_WRITE_BYTES_WATCH_1, 664 V3D_PERFCNT_AXI_READ_BYTES_WATCH_1, 664 V3D_PERFCNT_AXI_READ_BYTES_WATCH_1, 665 V3D_PERFCNT_TLB_PARTIAL_QUADS, 665 V3D_PERFCNT_TLB_PARTIAL_QUADS, 666 V3D_PERFCNT_TMU_CONFIG_ACCESSES, 666 V3D_PERFCNT_TMU_CONFIG_ACCESSES, 667 V3D_PERFCNT_L2T_NO_ID_STALL, 667 V3D_PERFCNT_L2T_NO_ID_STALL, 668 V3D_PERFCNT_L2T_COM_QUE_STALL, 668 V3D_PERFCNT_L2T_COM_QUE_STALL, 669 V3D_PERFCNT_L2T_TMU_WRITES, 669 V3D_PERFCNT_L2T_TMU_WRITES, 670 V3D_PERFCNT_TMU_ACTIVE_CYCLES, 670 V3D_PERFCNT_TMU_ACTIVE_CYCLES, 671 V3D_PERFCNT_TMU_STALLED_CYCLES, 671 V3D_PERFCNT_TMU_STALLED_CYCLES, 672 V3D_PERFCNT_CLE_ACTIVE, 672 V3D_PERFCNT_CLE_ACTIVE, 673 V3D_PERFCNT_L2T_TMU_READS, 673 V3D_PERFCNT_L2T_TMU_READS, 674 V3D_PERFCNT_L2T_CLE_READS, 674 V3D_PERFCNT_L2T_CLE_READS, 675 V3D_PERFCNT_L2T_VCD_READS, 675 V3D_PERFCNT_L2T_VCD_READS, 676 V3D_PERFCNT_L2T_TMUCFG_READS, 676 V3D_PERFCNT_L2T_TMUCFG_READS, 677 V3D_PERFCNT_L2T_SLC0_READS, 677 V3D_PERFCNT_L2T_SLC0_READS, 678 V3D_PERFCNT_L2T_SLC1_READS, 678 V3D_PERFCNT_L2T_SLC1_READS, 679 V3D_PERFCNT_L2T_SLC2_READS, 679 V3D_PERFCNT_L2T_SLC2_READS, 680 V3D_PERFCNT_L2T_TMU_W_MISSES, 680 V3D_PERFCNT_L2T_TMU_W_MISSES, 681 V3D_PERFCNT_L2T_TMU_R_MISSES, 681 V3D_PERFCNT_L2T_TMU_R_MISSES, 682 V3D_PERFCNT_L2T_CLE_MISSES, 682 V3D_PERFCNT_L2T_CLE_MISSES, 683 V3D_PERFCNT_L2T_VCD_MISSES, 683 V3D_PERFCNT_L2T_VCD_MISSES, 684 V3D_PERFCNT_L2T_TMUCFG_MISSES, 684 V3D_PERFCNT_L2T_TMUCFG_MISSES, 685 V3D_PERFCNT_L2T_SLC0_MISSES, 685 V3D_PERFCNT_L2T_SLC0_MISSES, 686 V3D_PERFCNT_L2T_SLC1_MISSES, 686 V3D_PERFCNT_L2T_SLC1_MISSES, 687 V3D_PERFCNT_L2T_SLC2_MISSES, 687 V3D_PERFCNT_L2T_SLC2_MISSES, 688 V3D_PERFCNT_CORE_MEM_WRITES, 688 V3D_PERFCNT_CORE_MEM_WRITES, 689 V3D_PERFCNT_L2T_MEM_WRITES, 689 V3D_PERFCNT_L2T_MEM_WRITES, 690 V3D_PERFCNT_PTB_MEM_WRITES, 690 V3D_PERFCNT_PTB_MEM_WRITES, 691 V3D_PERFCNT_TLB_MEM_WRITES, 691 V3D_PERFCNT_TLB_MEM_WRITES, 692 V3D_PERFCNT_CORE_MEM_READS, 692 V3D_PERFCNT_CORE_MEM_READS, 693 V3D_PERFCNT_L2T_MEM_READS, 693 V3D_PERFCNT_L2T_MEM_READS, 694 V3D_PERFCNT_PTB_MEM_READS, 694 V3D_PERFCNT_PTB_MEM_READS, 695 V3D_PERFCNT_PSE_MEM_READS, 695 V3D_PERFCNT_PSE_MEM_READS, 696 V3D_PERFCNT_TLB_MEM_READS, 696 V3D_PERFCNT_TLB_MEM_READS, 697 V3D_PERFCNT_GMP_MEM_READS, 697 V3D_PERFCNT_GMP_MEM_READS, 698 V3D_PERFCNT_PTB_W_MEM_WORDS, 698 V3D_PERFCNT_PTB_W_MEM_WORDS, 699 V3D_PERFCNT_TLB_W_MEM_WORDS, 699 V3D_PERFCNT_TLB_W_MEM_WORDS, 700 V3D_PERFCNT_PSE_R_MEM_WORDS, 700 V3D_PERFCNT_PSE_R_MEM_WORDS, 701 V3D_PERFCNT_TLB_R_MEM_WORDS, 701 V3D_PERFCNT_TLB_R_MEM_WORDS, 702 V3D_PERFCNT_TMU_MRU_HITS, 702 V3D_PERFCNT_TMU_MRU_HITS, 703 V3D_PERFCNT_COMPUTE_ACTIVE, 703 V3D_PERFCNT_COMPUTE_ACTIVE, 704 V3D_PERFCNT_NUM, 704 V3D_PERFCNT_NUM, 705 }; 705 }; 706 706 707 #define DRM_V3D_MAX_PERF_COUNTERS 707 #define DRM_V3D_MAX_PERF_COUNTERS 32 708 708 709 struct drm_v3d_perfmon_create { 709 struct drm_v3d_perfmon_create { 710 __u32 id; 710 __u32 id; 711 __u32 ncounters; 711 __u32 ncounters; 712 __u8 counters[DRM_V3D_MAX_PERF_COUNTER 712 __u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; 713 }; 713 }; 714 714 715 struct drm_v3d_perfmon_destroy { 715 struct drm_v3d_perfmon_destroy { 716 __u32 id; 716 __u32 id; 717 }; 717 }; 718 718 719 /* 719 /* 720 * Returns the values of the performance count 720 * Returns the values of the performance counters tracked by this 721 * perfmon (as an array of ncounters u64 value 721 * perfmon (as an array of ncounters u64 values). 722 * 722 * 723 * No implicit synchronization is performed, s 723 * No implicit synchronization is performed, so the user has to 724 * guarantee that any jobs using this perfmon 724 * guarantee that any jobs using this perfmon have already been 725 * completed (probably by blocking on the seq 725 * completed (probably by blocking on the seqno returned by the 726 * last exec that used the perfmon). 726 * last exec that used the perfmon). 727 */ 727 */ 728 struct drm_v3d_perfmon_get_values { 728 struct drm_v3d_perfmon_get_values { 729 __u32 id; 729 __u32 id; 730 __u32 pad; 730 __u32 pad; 731 __u64 values_ptr; 731 __u64 values_ptr; 732 }; 732 }; 733 733 734 #define DRM_V3D_PERFCNT_MAX_NAME 64 734 #define DRM_V3D_PERFCNT_MAX_NAME 64 735 #define DRM_V3D_PERFCNT_MAX_CATEGORY 32 735 #define DRM_V3D_PERFCNT_MAX_CATEGORY 32 736 #define DRM_V3D_PERFCNT_MAX_DESCRIPTION 256 736 #define DRM_V3D_PERFCNT_MAX_DESCRIPTION 256 737 737 738 /** 738 /** 739 * struct drm_v3d_perfmon_get_counter - ioctl 739 * struct drm_v3d_perfmon_get_counter - ioctl to get the description of a 740 * performance counter 740 * performance counter 741 * 741 * 742 * As userspace needs to retrieve information 742 * As userspace needs to retrieve information about the performance counters 743 * available, this IOCTL allows users to get i 743 * available, this IOCTL allows users to get information about a performance 744 * counter (name, category and description). 744 * counter (name, category and description). 745 */ 745 */ 746 struct drm_v3d_perfmon_get_counter { 746 struct drm_v3d_perfmon_get_counter { 747 /* 747 /* 748 * Counter ID 748 * Counter ID 749 * 749 * 750 * Must be smaller than the maximum nu 750 * Must be smaller than the maximum number of performance counters, which 751 * can be retrieve through DRM_V3D_PAR 751 * can be retrieve through DRM_V3D_PARAM_MAX_PERF_COUNTERS. 752 */ 752 */ 753 __u8 counter; 753 __u8 counter; 754 754 755 /* Name of the counter */ 755 /* Name of the counter */ 756 __u8 name[DRM_V3D_PERFCNT_MAX_NAME]; 756 __u8 name[DRM_V3D_PERFCNT_MAX_NAME]; 757 757 758 /* Category of the counter */ 758 /* Category of the counter */ 759 __u8 category[DRM_V3D_PERFCNT_MAX_CATE 759 __u8 category[DRM_V3D_PERFCNT_MAX_CATEGORY]; 760 760 761 /* Description of the counter */ 761 /* Description of the counter */ 762 __u8 description[DRM_V3D_PERFCNT_MAX_D 762 __u8 description[DRM_V3D_PERFCNT_MAX_DESCRIPTION]; 763 763 764 /* mbz */ 764 /* mbz */ 765 __u8 reserved[7]; 765 __u8 reserved[7]; 766 }; 766 }; 767 767 768 #if defined(__cplusplus) 768 #if defined(__cplusplus) 769 } 769 } 770 #endif 770 #endif 771 771 772 #endif /* _V3D_DRM_H_ */ 772 #endif /* _V3D_DRM_H_ */ 773 773
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.