1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-O 1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2018 Mellanox Technologies. * 2 /* Copyright (c) 2018 Mellanox Technologies. */ 3 3 4 #ifndef MLX5_CORE_EQ_H 4 #ifndef MLX5_CORE_EQ_H 5 #define MLX5_CORE_EQ_H 5 #define MLX5_CORE_EQ_H 6 6 >> 7 #define MLX5_IRQ_VEC_COMP_BASE 1 7 #define MLX5_NUM_CMD_EQE (32) 8 #define MLX5_NUM_CMD_EQE (32) 8 #define MLX5_NUM_ASYNC_EQE (0x1000) 9 #define MLX5_NUM_ASYNC_EQE (0x1000) 9 #define MLX5_NUM_SPARE_EQE (0x80) 10 #define MLX5_NUM_SPARE_EQE (0x80) 10 11 11 struct mlx5_eq; 12 struct mlx5_eq; 12 struct mlx5_irq; << 13 struct mlx5_core_dev; 13 struct mlx5_core_dev; 14 14 15 struct mlx5_eq_param { 15 struct mlx5_eq_param { >> 16 u8 irq_index; 16 int nent; 17 int nent; 17 u64 mask[4]; 18 u64 mask[4]; 18 struct mlx5_irq *irq; !! 19 cpumask_var_t affinity; 19 }; 20 }; 20 21 21 struct mlx5_eq * 22 struct mlx5_eq * 22 mlx5_eq_create_generic(struct mlx5_core_dev *d 23 mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param); 23 int 24 int 24 mlx5_eq_destroy_generic(struct mlx5_core_dev * 25 mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 25 int mlx5_eq_enable(struct mlx5_core_dev *dev, 26 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 26 struct notifier_block *nb); 27 struct notifier_block *nb); 27 void mlx5_eq_disable(struct mlx5_core_dev *dev 28 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 28 struct notifier_block *nb 29 struct notifier_block *nb); 29 30 30 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_e 31 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); 31 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 32 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); 32 33 33 /* The HCA will think the queue has overflowed 34 /* The HCA will think the queue has overflowed if we 34 * don't tell it we've been processing events. 35 * don't tell it we've been processing events. We 35 * create EQs with MLX5_NUM_SPARE_EQE extra en 36 * create EQs with MLX5_NUM_SPARE_EQE extra entries, 36 * so we must update our consumer index at 37 * so we must update our consumer index at 37 * least that often. 38 * least that often. 38 * 39 * 39 * mlx5_eq_update_cc must be called on every E 40 * mlx5_eq_update_cc must be called on every EQE @EQ irq handler 40 */ 41 */ 41 static inline u32 mlx5_eq_update_cc(struct mlx 42 static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) 42 { 43 { 43 if (unlikely(cc >= MLX5_NUM_SPARE_EQE) 44 if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) { 44 mlx5_eq_update_ci(eq, cc, 0); 45 mlx5_eq_update_ci(eq, cc, 0); 45 cc = 0; 46 cc = 0; 46 } 47 } 47 return cc; 48 return cc; 48 } 49 } 49 50 50 struct mlx5_nb { 51 struct mlx5_nb { 51 struct notifier_block nb; 52 struct notifier_block nb; 52 u8 event_type; 53 u8 event_type; 53 }; 54 }; 54 55 55 #define mlx5_nb_cof(ptr, type, member) \ 56 #define mlx5_nb_cof(ptr, type, member) \ 56 (container_of(container_of(ptr, struct 57 (container_of(container_of(ptr, struct mlx5_nb, nb), type, member)) 57 58 58 #define MLX5_NB_INIT(name, handler, event) do 59 #define MLX5_NB_INIT(name, handler, event) do { \ 59 (name)->nb.notifier_call = handler; 60 (name)->nb.notifier_call = handler; \ 60 (name)->event_type = MLX5_EVENT_TYPE_# 61 (name)->event_type = MLX5_EVENT_TYPE_##event; \ 61 } while (0) 62 } while (0) 62 63 63 #endif /* MLX5_CORE_EQ_H */ 64 #endif /* MLX5_CORE_EQ_H */ 64 65
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.