1MLX5DV_QUERY_DEVICE(3)     Library Functions Manual     MLX5DV_QUERY_DEVICE(3)
2
3
4

NAME

6       mlx5dv_query_device - Query device capabilities specific to mlx5
7

SYNOPSIS

9       #include <infiniband/mlx5dv.h>
10
11       int mlx5dv_query_device(struct ibv_context *ctx_in,
12                               struct mlx5dv_context *attrs_out);
13

DESCRIPTION

15       mlx5dv_query_device() Query HW device-specific information which is im‐
16       portant for data-path, but isn't provided by ibv_query_device(3).
17
18       This function returns version, flags and compatibility mask.  The  ver‐
19       sion  represents  the  format  of the internal hardware structures that
20       mlx5dv.h represents. Additions of new fields to the existed  structures
21       are handled by comp_mask field.
22
23       struct mlx5dv_sw_parsing_caps {
24               uint32_t sw_parsing_offloads; /* Use enum mlx5dv_sw_parsing_offloads */
25               uint32_t supported_qpts;
26       };
27
28       struct mlx5dv_striding_rq_caps {
29               uint32_t min_single_stride_log_num_of_bytes; /* min log size of each stride */
30               uint32_t max_single_stride_log_num_of_bytes; /* max log size of each stride */
31               uint32_t min_single_wqe_log_num_of_strides; /* min log number of strides per WQE */
32               uint32_t max_single_wqe_log_num_of_strides; /* max log number of strides per WQE */
33               uint32_t supported_qpts;
34       };
35
36       struct mlx5dv_dci_streams_caps {
37       uint8_t max_log_num_concurent; /* max log number of parallel different streams that could be handled by HW */
38       uint8_t max_log_num_errored; /* max DCI error stream channels supported per DCI before a DCI move to an error state */
39       };
40
41       struct mlx5dv_context {
42               uint8_t         version;
43               uint64_t        flags;
44               uint64_t        comp_mask; /* Use enum mlx5dv_context_comp_mask */
45               struct mlx5dv_cqe_comp_caps     cqe_comp_caps;
46               struct mlx5dv_sw_parsing_caps sw_parsing_caps;
47               uint32_t  tunnel_offloads_caps;
48               uint32_t        max_dynamic_bfregs /* max blue-flame registers that can be dynamiclly allocated */
49               uint64_t        max_clock_info_update_nsec;
50               uint32_t        flow_action_flags; /* use enum mlx5dv_flow_action_cap_flags */
51               uint32_t        dc_odp_caps; /* use enum ibv_odp_transport_cap_bits */
52               void      *hca_core_clock; /* points to a memory location that is mapped to the HCA's core clock */
53               struct mlx5dv_sig_caps sig_caps;
54               size_t max_wr_memcpy_length; /* max length that is supported by the DMA memcpy WR */
55               struct mlx5dv_crypto_caps crypto_caps;
56       };
57
58       enum mlx5dv_context_flags {
59               /*
60                * This flag indicates if CQE version 0 or 1 is needed.
61                */
62                MLX5DV_CONTEXT_FLAGS_CQE_V1 = (1 << 0),
63                MLX5DV_CONTEXT_FLAGS_OBSOLETE    =  (1 << 1), /* Obsoleted, don't use */
64                MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED  = (1 << 2), /* Multi packet WQE is allowed */
65                MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW = (1 << 3), /* Enhanced multi packet WQE is supported or not */
66                MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP = (1 << 4), /* Support CQE 128B compression */
67                MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD = (1 << 5), /* Support CQE 128B padding */
68                MLX5DV_CONTEXT_FLAGS_PACKET_BASED_CREDIT_MODE = (1 << 6), /* Support packet based credit mode in RC QP */
69               /*
70                * If CQ was created with IBV_WC_EX_WITH_COMPLETION_TIMESTAMP_WALLCLOCK, CQEs timestamp will be in real time format.
71                */
72                MLX5DV_CONTEXT_FLAGS_REAL_TIME_TS = (1 << 7),
73       };
74
75
76       enum mlx5dv_context_comp_mask {
77               MLX5DV_CONTEXT_MASK_CQE_COMPRESION      = 1 << 0,
78               MLX5DV_CONTEXT_MASK_SWP                 = 1 << 1,
79               MLX5DV_CONTEXT_MASK_STRIDING_RQ         = 1 << 2,
80               MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS     = 1 << 3,
81               MLX5DV_CONTEXT_MASK_DYN_BFREGS          = 1 << 4,
82               MLX5DV_CONTEXT_MASK_CLOCK_INFO_UPDATE   = 1 << 5,
83               MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS   = 1 << 6,
84               MLX5DV_CONTEXT_MASK_DC_ODP_CAPS         = 1 << 7,
85               MLX5DV_CONTEXT_MASK_HCA_CORE_CLOCK      = 1 << 8,
86               MLX5DV_CONTEXT_MASK_NUM_LAG_PORTS       = 1 << 9,
87               MLX5DV_CONTEXT_MASK_SIGNATURE_OFFLOAD   = 1 << 10,
88               MLX5DV_CONTEXT_MASK_DCI_STREAMS         = 1 << 11,
89               MLX5DV_CONTEXT_MASK_WR_MEMCPY_LENGTH    = 1 << 12,
90               MLX5DV_CONTEXT_MASK_CRYPTO_OFFLOAD      = 1 << 13,
91       };
92
93
94       enum enum mlx5dv_sw_parsing_offloads {
95               MLX5DV_SW_PARSING         = 1 << 0,
96               MLX5DV_SW_PARSING_CSUM    = 1 << 1,
97               MLX5DV_SW_PARSING_LSO     = 1 << 2,
98       };
99
100
101       enum mlx5dv_tunnel_offloads {
102               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN  = 1 << 0,
103               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE    = 1 << 1,
104               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
105       };
106
107
108       enum mlx5dv_flow_action_cap_flags {
109               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM                = 1 << 0, /* Flow action ESP (with AES_GCM keymat) is supported */
110               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA   = 1 << 1, /* Flow action ESP always return metadata in the payload */
111               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING   = 1 << 2, /* ESP (with AESGCM keymat) Supports matching by SPI (rather than hashing against SPI) */
112               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD   = 1 << 3, /* Flow action ESP supports full offload (with AES_GCM keymat) */
113               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN   = 1 << 4, /* Flow action ESP (with AES_GCM keymat), ESN comes implicitly from IV. */
114       };
115
116
117       struct mlx5dv_sig_caps {
118               uint64_t block_size; /* use enum mlx5dv_block_size_caps */
119               uint32_t block_prot; /* use enum mlx5dv_sig_prot_caps */
120               uint16_t t10dif_bg; /* use enum mlx5dv_sig_t10dif_bg_caps */
121               uint16_t crc_type; /* use enum mlx5dv_sig_crc_type_caps */
122       };
123
124       enum mlx5dv_sig_prot_caps {
125               MLX5DV_SIG_PROT_CAP_T10DIF = 1 << MLX5DV_SIG_TYPE_T10DIF,
126               MLX5DV_SIG_PROT_CAP_CRC = 1 << MLX5DV_SIG_TYPE_CRC,
127       };
128
129       enum mlx5dv_sig_t10dif_bg_caps {
130               MLX5DV_SIG_T10DIF_BG_CAP_CRC = 1 << MLX5DV_SIG_T10DIF_CRC,
131               MLX5DV_SIG_T10DIF_BG_CAP_CSUM = 1 << MLX5DV_SIG_T10DIF_CSUM,
132       };
133
134       enum mlx5dv_sig_crc_type_caps {
135               MLX5DV_SIG_CRC_TYPE_CAP_CRC32 = 1 << MLX5DV_SIG_CRC_TYPE_CRC32,
136               MLX5DV_SIG_CRC_TYPE_CAP_CRC32C = 1 << MLX5DV_SIG_CRC_TYPE_CRC32C,
137               MLX5DV_SIG_CRC_TYPE_CAP_CRC64_XP10 = 1 << MLX5DV_SIG_CRC_TYPE_CRC64_XP10,
138       };
139
140       enum mlx5dv_block_size_caps {
141               MLX5DV_BLOCK_SIZE_CAP_512 = 1 << MLX5DV_BLOCK_SIZE_512,
142               MLX5DV_BLOCK_SIZE_CAP_520 = 1 << MLX5DV_BLOCK_SIZE_520,
143               MLX5DV_BLOCK_SIZE_CAP_4048 = 1 << MLX5DV_BLOCK_SIZE_4048,
144               MLX5DV_BLOCK_SIZE_CAP_4096 = 1 << MLX5DV_BLOCK_SIZE_4096,
145               MLX5DV_BLOCK_SIZE_CAP_4160 = 1 << MLX5DV_BLOCK_SIZE_4160,
146       };
147
148
149       struct mlx5dv_crypto_caps {
150               /*
151                * if failed_selftests != 0 it means there are some self tests errors
152                * that may render specific crypto engines unusable. Exact code meaning
153                * should be consulted with NVIDIA.
154                */
155               uint16_t failed_selftests;
156               uint8_t crypto_engines; /* use enum mlx5dv_crypto_engines_caps */
157               uint8_t wrapped_import_method; /* use enum mlx5dv_crypto_wrapped_import_method_caps */
158               uint8_t log_max_num_deks;
159               uint32_t flags; /* use enum mlx5dv_crypto_caps_flags */
160       };
161
162       enum mlx5dv_crypto_engines_caps {
163                    MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS = 1 << 0,
164       };
165
166       enum mlx5dv_crypto_wrapped_import_method_caps {
167                    MLX5DV_CRYPTO_WRAPPED_IMPORT_METHOD_CAP_AES_XTS = 1 << 0,
168       };
169
170       enum mlx5dv_crypto_caps_flags {
171                    /* Indicates whether crypto capabilities are enabled on the device. */
172                    MLX5DV_CRYPTO_CAPS_CRYPTO = 1 << 0,
173
174                    /* Indicates whether crypto engines that are in wrapped import method are operational. */
175                    MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL = 1 << 1,
176
177                    /*
178                     * If set, indicates that after the next FW reset the device will go back to
179                     * commissioning mode, meaning that MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL
180                     * will be set to 0.
181                     */
182                    MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_GOING_TO_COMMISSIONING = 1 << 2,
183       };
184
185

RETURN VALUE

187       0  on  success  or  the  value of errno on failure (which indicates the
188       failure reason).
189

NOTES

191        * Compatibility mask (comp_mask) is in/out field.
192

SEE ALSO

194       mlx5dv(7), ibv_query_device(3)
195

AUTHORS

197       Leon Romanovsky <leonro@mellanox.com>
198
199
200
2011.0.0                             2017-02-02            MLX5DV_QUERY_DEVICE(3)
Impressum