1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #ifndef HISI_ACC_QM_H 4 #define HISI_ACC_QM_H 5 6 #include <linux/bitfield.h> 7 #include <linux/debugfs.h> 8 #include <linux/iopoll.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #define QM_QNUM_V1 4096 13 #define QM_QNUM_V2 1024 14 #define QM_MAX_VFS_NUM_V2 63 15 16 /* qm user domain */ 17 #define QM_ARUSER_M_CFG_1 0x100088 18 #define AXUSER_SNOOP_ENABLE BIT(30) 19 #define AXUSER_CMD_TYPE GENMASK(14, 12) 20 #define AXUSER_CMD_SMMU_NORMAL 1 21 #define AXUSER_NS BIT(6) 22 #define AXUSER_NO BIT(5) 23 #define AXUSER_FP BIT(4) 24 #define AXUSER_SSV BIT(0) 25 #define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ 26 FIELD_PREP(AXUSER_CMD_TYPE, \ 27 AXUSER_CMD_SMMU_NORMAL) | \ 28 AXUSER_NS | AXUSER_NO | AXUSER_FP) 29 #define QM_ARUSER_M_CFG_ENABLE 0x100090 30 #define ARUSER_M_CFG_ENABLE 0xfffffffe 31 #define QM_AWUSER_M_CFG_1 0x100098 32 #define QM_AWUSER_M_CFG_ENABLE 0x1000a0 33 #define AWUSER_M_CFG_ENABLE 0xfffffffe 34 #define QM_WUSER_M_CFG_ENABLE 0x1000a8 35 #define WUSER_M_CFG_ENABLE 0xffffffff 36 37 /* mailbox */ 38 #define QM_MB_CMD_SQC 0x0 39 #define QM_MB_CMD_CQC 0x1 40 #define QM_MB_CMD_EQC 0x2 41 #define QM_MB_CMD_AEQC 0x3 42 #define QM_MB_CMD_SQC_BT 0x4 43 #define QM_MB_CMD_CQC_BT 0x5 44 #define QM_MB_CMD_SQC_VFT_V2 0x6 45 #define QM_MB_CMD_STOP_QP 0x8 46 #define QM_MB_CMD_FLUSH_QM 0x9 47 #define QM_MB_CMD_SRC 0xc 48 #define QM_MB_CMD_DST 0xd 49 50 #define QM_MB_CMD_SEND_BASE 0x300 51 #define QM_MB_EVENT_SHIFT 8 52 #define QM_MB_BUSY_SHIFT 13 53 #define QM_MB_OP_SHIFT 14 54 #define QM_MB_CMD_DATA_ADDR_L 0x304 55 #define QM_MB_CMD_DATA_ADDR_H 0x308 56 #define QM_MB_MAX_WAIT_CNT 6000 57 58 /* doorbell */ 59 #define QM_DOORBELL_CMD_SQ 0 60 #define QM_DOORBELL_CMD_CQ 1 61 #define QM_DOORBELL_CMD_EQ 2 62 #define QM_DOORBELL_CMD_AEQ 3 63 64 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 65 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 66 #define QM_QP_MAX_NUM_SHIFT 11 67 #define QM_DB_CMD_SHIFT_V2 12 68 #define QM_DB_RAND_SHIFT_V2 16 69 #define QM_DB_INDEX_SHIFT_V2 32 70 #define QM_DB_PRIORITY_SHIFT_V2 48 71 #define QM_VF_STATE 0x60 72 73 /* qm cache */ 74 #define QM_CACHE_CTL 0x100050 75 #define SQC_CACHE_ENABLE BIT(0) 76 #define CQC_CACHE_ENABLE BIT(1) 77 #define SQC_CACHE_WB_ENABLE BIT(4) 78 #define SQC_CACHE_WB_THRD GENMASK(10, 5) 79 #define CQC_CACHE_WB_ENABLE BIT(11) 80 #define CQC_CACHE_WB_THRD GENMASK(17, 12) 81 #define QM_AXI_M_CFG 0x1000ac 82 #define AXI_M_CFG 0xffff 83 #define QM_AXI_M_CFG_ENABLE 0x1000b0 84 #define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 85 #define AXI_M_CFG_ENABLE 0xffffffff 86 #define QM_PEH_AXUSER_CFG 0x1000cc 87 #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 88 #define PEH_AXUSER_CFG 0x401001 89 #define PEH_AXUSER_CFG_ENABLE 0xffffffff 90 91 #define QM_MIN_QNUM 2 92 #define HISI_ACC_SGL_SGE_NR_MAX 255 93 #define QM_SHAPER_CFG 0x100164 94 #define QM_SHAPER_ENABLE BIT(30) 95 #define QM_SHAPER_TYPE1_OFFSET 10 96 97 /* page number for queue file region */ 98 #define QM_DOORBELL_PAGE_NR 1 99 100 /* uacce mode of the driver */ 101 #define UACCE_MODE_NOUACCE 0 /* don't use uacce */ 102 #define UACCE_MODE_SVA 1 /* use uacce sva mode */ 103 #define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" 104 105 enum qm_stop_reason { 106 QM_NORMAL, 107 QM_SOFT_RESET, 108 QM_DOWN, 109 }; 110 111 enum qm_state { 112 QM_WORK = 0, 113 QM_STOP, 114 }; 115 116 enum qp_state { 117 QP_START = 1, 118 QP_STOP, 119 }; 120 121 enum qm_hw_ver { 122 QM_HW_V1 = 0x20, 123 QM_HW_V2 = 0x21, 124 QM_HW_V3 = 0x30, 125 }; 126 127 enum qm_fun_type { 128 QM_HW_PF, 129 QM_HW_VF, 130 }; 131 132 enum qm_debug_file { 133 CURRENT_QM, 134 CURRENT_Q, 135 CLEAR_ENABLE, 136 DEBUG_FILE_NUM, 137 }; 138 139 enum qm_vf_state { 140 QM_READY = 0, 141 QM_NOT_READY, 142 }; 143 144 enum qm_misc_ctl_bits { 145 QM_DRIVER_REMOVING = 0x0, 146 QM_RST_SCHED, 147 QM_RESETTING, 148 QM_MODULE_PARAM, 149 }; 150 151 enum qm_cap_bits { 152 QM_SUPPORT_DB_ISOLATION = 0x0, 153 QM_SUPPORT_FUNC_QOS, 154 QM_SUPPORT_STOP_QP, 155 QM_SUPPORT_STOP_FUNC, 156 QM_SUPPORT_MB_COMMAND, 157 QM_SUPPORT_SVA_PREFETCH, 158 QM_SUPPORT_RPM, 159 }; 160 161 struct qm_dev_alg { 162 u64 alg_msk; 163 const char *alg; 164 }; 165 166 struct qm_dev_dfx { 167 u32 dev_state; 168 u32 dev_timeout; 169 }; 170 171 struct dfx_diff_registers { 172 u32 *regs; 173 u32 reg_offset; 174 u32 reg_len; 175 }; 176 177 struct qm_dfx { 178 atomic64_t err_irq_cnt; 179 atomic64_t aeq_irq_cnt; 180 atomic64_t abnormal_irq_cnt; 181 atomic64_t create_qp_err_cnt; 182 atomic64_t mb_err_cnt; 183 }; 184 185 struct debugfs_file { 186 enum qm_debug_file index; 187 struct mutex lock; 188 struct qm_debug *debug; 189 }; 190 191 struct qm_debug { 192 u32 curr_qm_qp_num; 193 u32 sqe_mask_offset; 194 u32 sqe_mask_len; 195 struct qm_dfx dfx; 196 struct dentry *debug_root; 197 struct dentry *qm_d; 198 struct debugfs_file files[DEBUG_FILE_NUM]; 199 struct qm_dev_dfx dev_dfx; 200 unsigned int *qm_last_words; 201 /* ACC engines recoreding last regs */ 202 unsigned int *last_words; 203 struct dfx_diff_registers *qm_diff_regs; 204 struct dfx_diff_registers *acc_diff_regs; 205 }; 206 207 struct qm_shaper_factor { 208 u32 func_qos; 209 u64 cir_b; 210 u64 cir_u; 211 u64 cir_s; 212 u64 cbs_s; 213 }; 214 215 struct qm_dma { 216 void *va; 217 dma_addr_t dma; 218 size_t size; 219 }; 220 221 struct hisi_qm_status { 222 u32 eq_head; 223 bool eqc_phase; 224 u32 aeq_head; 225 bool aeqc_phase; 226 atomic_t flags; 227 int stop_reason; 228 }; 229 230 struct hisi_qm; 231 232 struct hisi_qm_err_info { 233 char *acpi_rst; 234 u32 msi_wr_port; 235 u32 ecc_2bits_mask; 236 u32 qm_shutdown_mask; 237 u32 dev_shutdown_mask; 238 u32 qm_reset_mask; 239 u32 dev_reset_mask; 240 u32 ce; 241 u32 nfe; 242 u32 fe; 243 }; 244 245 struct hisi_qm_err_status { 246 u32 is_qm_ecc_mbit; 247 u32 is_dev_ecc_mbit; 248 }; 249 250 struct hisi_qm_err_ini { 251 int (*hw_init)(struct hisi_qm *qm); 252 void (*hw_err_enable)(struct hisi_qm *qm); 253 void (*hw_err_disable)(struct hisi_qm *qm); 254 u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); 255 void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); 256 void (*open_axi_master_ooo)(struct hisi_qm *qm); 257 void (*close_axi_master_ooo)(struct hisi_qm *qm); 258 void (*open_sva_prefetch)(struct hisi_qm *qm); 259 void (*close_sva_prefetch)(struct hisi_qm *qm); 260 void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); 261 void (*show_last_dfx_regs)(struct hisi_qm *qm); 262 void (*err_info_init)(struct hisi_qm *qm); 263 }; 264 265 struct hisi_qm_cap_info { 266 u32 type; 267 /* Register offset */ 268 u32 offset; 269 /* Bit offset in register */ 270 u32 shift; 271 u32 mask; 272 u32 v1_val; 273 u32 v2_val; 274 u32 v3_val; 275 }; 276 277 struct hisi_qm_cap_record { 278 u32 type; 279 u32 cap_val; 280 }; 281 282 struct hisi_qm_cap_tables { 283 struct hisi_qm_cap_record *qm_cap_table; 284 struct hisi_qm_cap_record *dev_cap_table; 285 }; 286 287 struct hisi_qm_list { 288 struct mutex lock; 289 struct list_head list; 290 int (*register_to_crypto)(struct hisi_qm *qm); 291 void (*unregister_from_crypto)(struct hisi_qm *qm); 292 }; 293 294 struct hisi_qm_poll_data { 295 struct hisi_qm *qm; 296 struct work_struct work; 297 u16 *qp_finish_id; 298 u16 eqe_num; 299 }; 300 301 /** 302 * struct qm_err_isolate 303 * @isolate_lock: protects device error log 304 * @err_threshold: user config error threshold which triggers isolation 305 * @is_isolate: device isolation state 306 * @uacce_hw_errs: index into qm device error list 307 */ 308 struct qm_err_isolate { 309 struct mutex isolate_lock; 310 u32 err_threshold; 311 bool is_isolate; 312 struct list_head qm_hw_errs; 313 }; 314 315 struct qm_rsv_buf { 316 struct qm_sqc *sqc; 317 struct qm_cqc *cqc; 318 struct qm_eqc *eqc; 319 struct qm_aeqc *aeqc; 320 dma_addr_t sqc_dma; 321 dma_addr_t cqc_dma; 322 dma_addr_t eqc_dma; 323 dma_addr_t aeqc_dma; 324 struct qm_dma qcdma; 325 }; 326 327 struct hisi_qm { 328 enum qm_hw_ver ver; 329 enum qm_fun_type fun_type; 330 const char *dev_name; 331 struct pci_dev *pdev; 332 void __iomem *io_base; 333 void __iomem *db_io_base; 334 335 /* Capbility version, 0: not supports */ 336 u32 cap_ver; 337 u32 sqe_size; 338 u32 qp_base; 339 u32 qp_num; 340 u32 qp_in_used; 341 u32 ctrl_qp_num; 342 u32 max_qp_num; 343 u32 vfs_num; 344 u32 db_interval; 345 u16 eq_depth; 346 u16 aeq_depth; 347 struct list_head list; 348 struct hisi_qm_list *qm_list; 349 350 struct qm_dma qdma; 351 struct qm_sqc *sqc; 352 struct qm_cqc *cqc; 353 struct qm_eqe *eqe; 354 struct qm_aeqe *aeqe; 355 dma_addr_t sqc_dma; 356 dma_addr_t cqc_dma; 357 dma_addr_t eqe_dma; 358 dma_addr_t aeqe_dma; 359 struct qm_rsv_buf xqc_buf; 360 361 struct hisi_qm_status status; 362 const struct hisi_qm_err_ini *err_ini; 363 struct hisi_qm_err_info err_info; 364 struct hisi_qm_err_status err_status; 365 /* driver removing and reset sched */ 366 unsigned long misc_ctl; 367 /* Device capability bit */ 368 unsigned long caps; 369 370 struct rw_semaphore qps_lock; 371 struct idr qp_idr; 372 struct hisi_qp *qp_array; 373 struct hisi_qm_poll_data *poll_data; 374 375 struct mutex mailbox_lock; 376 377 const struct hisi_qm_hw_ops *ops; 378 379 struct qm_debug debug; 380 381 u32 error_mask; 382 383 struct workqueue_struct *wq; 384 struct work_struct rst_work; 385 struct work_struct cmd_process; 386 387 bool use_sva; 388 389 resource_size_t phys_base; 390 resource_size_t db_phys_base; 391 struct uacce_device *uacce; 392 int mode; 393 struct qm_shaper_factor *factor; 394 u32 mb_qos; 395 u32 type_rate; 396 struct qm_err_isolate isolate_data; 397 398 struct hisi_qm_cap_tables cap_tables; 399 }; 400 401 struct hisi_qp_status { 402 atomic_t used; 403 u16 sq_tail; 404 u16 cq_head; 405 bool cqc_phase; 406 atomic_t flags; 407 }; 408 409 struct hisi_qp_ops { 410 int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); 411 }; 412 413 struct hisi_qp { 414 u32 qp_id; 415 u16 sq_depth; 416 u16 cq_depth; 417 u8 alg_type; 418 u8 req_type; 419 420 struct qm_dma qdma; 421 void *sqe; 422 struct qm_cqe *cqe; 423 dma_addr_t sqe_dma; 424 dma_addr_t cqe_dma; 425 426 struct hisi_qp_status qp_status; 427 struct hisi_qp_ops *hw_ops; 428 void *qp_ctx; 429 void (*req_cb)(struct hisi_qp *qp, void *data); 430 void (*event_cb)(struct hisi_qp *qp); 431 432 struct hisi_qm *qm; 433 bool is_resetting; 434 bool is_in_kernel; 435 u16 pasid; 436 struct uacce_queue *uacce_q; 437 }; 438 439 static inline int q_num_set(const char *val, const struct kernel_param *kp, 440 unsigned int device) 441 { 442 struct pci_dev *pdev; 443 u32 n, q_num; 444 int ret; 445 446 if (!val) 447 return -EINVAL; 448 449 pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); 450 if (!pdev) { 451 q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); 452 pr_info("No device found currently, suppose queue number is %u\n", 453 q_num); 454 } else { 455 if (pdev->revision == QM_HW_V1) 456 q_num = QM_QNUM_V1; 457 else 458 q_num = QM_QNUM_V2; 459 460 pci_dev_put(pdev); 461 } 462 463 ret = kstrtou32(val, 10, &n); 464 if (ret || n < QM_MIN_QNUM || n > q_num) 465 return -EINVAL; 466 467 return param_set_int(val, kp); 468 } 469 470 static inline int vfs_num_set(const char *val, const struct kernel_param *kp) 471 { 472 u32 n; 473 int ret; 474 475 if (!val) 476 return -EINVAL; 477 478 ret = kstrtou32(val, 10, &n); 479 if (ret < 0) 480 return ret; 481 482 if (n > QM_MAX_VFS_NUM_V2) 483 return -EINVAL; 484 485 return param_set_int(val, kp); 486 } 487 488 static inline int mode_set(const char *val, const struct kernel_param *kp) 489 { 490 u32 n; 491 int ret; 492 493 if (!val) 494 return -EINVAL; 495 496 ret = kstrtou32(val, 10, &n); 497 if (ret != 0 || (n != UACCE_MODE_SVA && 498 n != UACCE_MODE_NOUACCE)) 499 return -EINVAL; 500 501 return param_set_int(val, kp); 502 } 503 504 static inline int uacce_mode_set(const char *val, const struct kernel_param *kp) 505 { 506 return mode_set(val, kp); 507 } 508 509 static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) 510 { 511 INIT_LIST_HEAD(&qm_list->list); 512 mutex_init(&qm_list->lock); 513 } 514 515 static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 516 { 517 mutex_lock(&qm_list->lock); 518 list_add_tail(&qm->list, &qm_list->list); 519 mutex_unlock(&qm_list->lock); 520 } 521 522 static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 523 { 524 mutex_lock(&qm_list->lock); 525 list_del(&qm->list); 526 mutex_unlock(&qm_list->lock); 527 } 528 529 int hisi_qm_init(struct hisi_qm *qm); 530 void hisi_qm_uninit(struct hisi_qm *qm); 531 int hisi_qm_start(struct hisi_qm *qm); 532 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); 533 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); 534 void hisi_qm_stop_qp(struct hisi_qp *qp); 535 int hisi_qp_send(struct hisi_qp *qp, const void *msg); 536 void hisi_qm_debug_init(struct hisi_qm *qm); 537 void hisi_qm_debug_regs_clear(struct hisi_qm *qm); 538 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); 539 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); 540 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); 541 void hisi_qm_dev_err_init(struct hisi_qm *qm); 542 void hisi_qm_dev_err_uninit(struct hisi_qm *qm); 543 int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, 544 struct dfx_diff_registers *dregs, u32 reg_len); 545 void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len); 546 void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, 547 struct dfx_diff_registers *dregs, u32 regs_len); 548 549 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 550 pci_channel_state_t state); 551 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); 552 void hisi_qm_reset_prepare(struct pci_dev *pdev); 553 void hisi_qm_reset_done(struct pci_dev *pdev); 554 555 int hisi_qm_wait_mb_ready(struct hisi_qm *qm); 556 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 557 bool op); 558 559 struct hisi_acc_sgl_pool; 560 struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, 561 struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, 562 u32 index, dma_addr_t *hw_sgl_dma); 563 void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, 564 struct hisi_acc_hw_sgl *hw_sgl); 565 struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, 566 u32 count, u32 sge_nr); 567 void hisi_acc_free_sgl_pool(struct device *dev, 568 struct hisi_acc_sgl_pool *pool); 569 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 570 u8 alg_type, int node, struct hisi_qp **qps); 571 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); 572 void hisi_qm_dev_shutdown(struct pci_dev *pdev); 573 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); 574 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); 575 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); 576 int hisi_qm_resume(struct device *dev); 577 int hisi_qm_suspend(struct device *dev); 578 void hisi_qm_pm_uninit(struct hisi_qm *qm); 579 void hisi_qm_pm_init(struct hisi_qm *qm); 580 int hisi_qm_get_dfx_access(struct hisi_qm *qm); 581 void hisi_qm_put_dfx_access(struct hisi_qm *qm); 582 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); 583 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 584 const struct hisi_qm_cap_info *info_table, 585 u32 index, bool is_read); 586 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 587 u32 dev_algs_size); 588 589 /* Used by VFIO ACC live migration driver */ 590 struct pci_driver *hisi_sec_get_pf_driver(void); 591 struct pci_driver *hisi_hpre_get_pf_driver(void); 592 struct pci_driver *hisi_zip_get_pf_driver(void); 593 #endif 594
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.