1MLX5DV_QUERY_DEVICE(3)     Library Functions Manual     MLX5DV_QUERY_DEVICE(3)
2
3
4

NAME

6       mlx5dv_query_device - Query device capabilities specific to mlx5
7

SYNOPSIS

9       #include <infiniband/mlx5dv.h>
10
11       int mlx5dv_query_device(struct ibv_context *ctx_in,
12                               struct mlx5dv_context *attrs_out);
13

DESCRIPTION

15       mlx5dv_query_device()  Query  HW  device-specific  information which is
16       important for data-path, but isn't provided by ibv_query_device(3).
17
18       This function returns version, flags and compatibility mask.  The  ver‐
19       sion  represents  the  format  of the internal hardware structures that
20       mlx5dv.h represents. Additions of new fields to the existed  structures
21       are handled by comp_mask field.
22
23       struct mlx5dv_sw_parsing_caps {
24               uint32_t sw_parsing_offloads; /* Use enum mlx5dv_sw_parsing_offloads */
25               uint32_t supported_qpts;
26       };
27
28       struct mlx5dv_striding_rq_caps {
29               uint32_t min_single_stride_log_num_of_bytes; /* min log size of each stride */
30               uint32_t max_single_stride_log_num_of_bytes; /* max log size of each stride */
31               uint32_t min_single_wqe_log_num_of_strides; /* min log number of strides per WQE */
32               uint32_t max_single_wqe_log_num_of_strides; /* max log number of strides per WQE */
33               uint32_t supported_qpts;
34       };
35
36       struct mlx5dv_context {
37               uint8_t         version;
38               uint64_t        flags;
39               uint64_t        comp_mask; /* Use enum mlx5dv_context_comp_mask */
40               struct mlx5dv_cqe_comp_caps     cqe_comp_caps;
41               struct mlx5dv_sw_parsing_caps sw_parsing_caps;
42               uint32_t  tunnel_offloads_caps;
43               uint32_t        max_dynamic_bfregs /* max blue-flame registers that can be dynamiclly allocated */
44               uint64_t        max_clock_info_update_nsec;
45               uint32_t        flow_action_flags; /* use enum mlx5dv_flow_action_cap_flags */
46               uint32_t        dc_odp_caps; /* use enum ibv_odp_transport_cap_bits */
47               void      *hca_core_clock; /* points to a memory location that is mapped to the HCA's core clock */
48       };
49
50       enum mlx5dv_context_flags {
51               /*
52                * This flag indicates if CQE version 0 or 1 is needed.
53                */
54                MLX5DV_CONTEXT_FLAGS_CQE_V1 = (1 << 0),
55                MLX5DV_CONTEXT_FLAGS_OBSOLETE    =  (1 << 1), /* Obsoleted, don't use */
56                MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED  = (1 << 2), /* Multi packet WQE is allowed */
57                MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW = (1 << 3), /* Enhanced multi packet WQE is supported or not */
58                MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP = (1 << 4), /* Support CQE 128B compression */
59                MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD = (1 << 5), /* Support CQE 128B padding */
60                MLX5DV_CONTEXT_FLAGS_PACKET_BASED_CREDIT_MODE = (1 << 6), /* Support packet based credit mode in RC QP */
61       };
62
63
64       enum mlx5dv_context_comp_mask {
65               MLX5DV_CONTEXT_MASK_CQE_COMPRESION      = 1 << 0,
66               MLX5DV_CONTEXT_MASK_SWP                 = 1 << 1,
67               MLX5DV_CONTEXT_MASK_STRIDING_RQ         = 1 << 2,
68               MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS     = 1 << 3,
69               MLX5DV_CONTEXT_MASK_DYN_BFREGS          = 1 << 4,
70               MLX5DV_CONTEXT_MASK_CLOCK_INFO_UPDATE   = 1 << 5,
71               MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS   = 1 << 6,
72               MLX5DV_CONTEXT_MASK_DC_ODP_CAPS         = 1 << 7,
73               MLX5DV_CONTEXT_MASK_HCA_CORE_CLOCK      = 1 << 8,
74               MLX5DV_CONTEXT_MASK_NUM_LAG_PORTS       = 1 << 9,
75       };
76
77
78       enum enum mlx5dv_sw_parsing_offloads {
79               MLX5DV_SW_PARSING         = 1 << 0,
80               MLX5DV_SW_PARSING_CSUM    = 1 << 1,
81               MLX5DV_SW_PARSING_LSO     = 1 << 2,
82       };
83
84
85       enum mlx5dv_tunnel_offloads {
86               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN  = 1 << 0,
87               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE    = 1 << 1,
88               MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
89       };
90
91
92       enum mlx5dv_flow_action_cap_flags {
93               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM                = 1 << 0, /* Flow action ESP (with AES_GCM keymat) is supported */
94               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA   = 1 << 1, /* Flow action ESP always return metadata in the payload */
95               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING   = 1 << 2, /* ESP (with AESGCM keymat) Supports matching by SPI (rather than hashing against SPI) */
96               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD   = 1 << 3, /* Flow action ESP supports full offload (with AES_GCM keymat) */
97               MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN   = 1 << 4, /* Flow action ESP (with AES_GCM keymat), ESN comes implicitly from IV. */
98       };
99
100

RETURN VALUE

102       0  on  success  or  the  value of errno on failure (which indicates the
103       failure reason).
104

NOTES

106        * Compatibility mask (comp_mask) is in/out field.
107

SEE ALSO

109       mlx5dv(7), ibv_query_device(3)
110

AUTHORS

112       Leon Romanovsky <leonro@mellanox.com>
113
114
115
1161.0.0                             2017-02-02            MLX5DV_QUERY_DEVICE(3)
Impressum