1IBV_QUERY_DEVICE_EX(3) Libibverbs Programmer's Manual IBV_QUERY_DEVICE_EX(3)
2
3
4
6 ibv_query_device_ex - query an RDMA device's attributes
7
9 #include <infiniband/verbs.h>
10
11 int ibv_query_device_ex(struct ibv_context *context,
12 struct ibv_device_attr_ex *attr);
13
15 ibv_query_device_ex() returns the attributes of the device with context
16 context. The argument attr is a pointer to an ibv_device_attr_ex
17 struct, as defined in <infiniband/verbs.h>.
18
19 struct ibv_device_attr_ex {
20 struct ibv_device_attr orig_attr;
21 uint32_t comp_mask; /* Compatibility mask that defines which of the following variables are valid */
22 struct ibv_odp_caps odp_caps; /* On-Demand Paging capabilities */
23 uint64_t completion_timestamp_mask; /* Completion timestamp mask (0 = unsupported) */
24 uint64_t hca_core_clock; /* The frequency (in kHZ) of the HCA (0 = unsupported) */
25 uint64_t device_cap_flags_ex; /* Extended device capability flags */
26 struct ibv_tso_caps tso_caps; /* TCP segmentation offload capabilities */
27 struct ibv_rss_caps rss_caps; /* RSS capabilities */
28 uint32_t max_wq_type_rq; /* Max Work Queue from type RQ */
29 struct ibv_packet_pacing_caps packet_pacing_caps; /* Packet pacing capabilities */
30 uint32_t raw_packet_caps; /* Raw packet capabilities, use enum ibv_raw_packet_caps */
31 struct ibv_tm_caps tm_caps; /* Tag matching capabilities */
32 struct ibv_cq_moderation_caps cq_mod_caps; /* CQ moderation max capabilities */
33 };
34
35 struct ibv_odp_caps {
36 uint64_t general_odp_caps; /* Mask with enum ibv_odp_general_cap_bits */
37 struct {
38 uint32_t rc_odp_caps; /* Mask with enum ibv_odp_tranport_cap_bits to know which operations are supported. */
39 uint32_t uc_odp_caps; /* Mask with enum ibv_odp_tranport_cap_bits to know which operations are supported. */
40 uint32_t ud_odp_caps; /* Mask with enum ibv_odp_tranport_cap_bits to know which operations are supported. */
41 } per_transport_caps;
42 };
43
44 enum ibv_odp_general_cap_bits {
45 IBV_ODP_SUPPORT = 1 << 0, /* On demand paging is supported */
46 };
47
48 enum ibv_odp_transport_cap_bits {
49 IBV_ODP_SUPPORT_SEND = 1 << 0, /* Send operations support on-demand paging */
50 IBV_ODP_SUPPORT_RECV = 1 << 1, /* Receive operations support on-demand paging */
51 IBV_ODP_SUPPORT_WRITE = 1 << 2, /* RDMA-Write operations support on-demand paging */
52 IBV_ODP_SUPPORT_READ = 1 << 3, /* RDMA-Read operations support on-demand paging */
53 IBV_ODP_SUPPORT_ATOMIC = 1 << 4, /* RDMA-Atomic operations support on-demand paging */
54 };
55
56 struct ibv_tso_caps {
57 uint32_t max_tso; /* Maximum payload size in bytes supported for segmentation by TSO engine.*/
58 uint32_t supported_qpts; /* Bitmap showing which QP types are supported by TSO operation. */
59 };
60
61 struct ibv_rss_caps {
62 uint32_t supported_qpts; /* Bitmap showing which QP types are supported RSS */
63 uint32_t max_rwq_indirection_tables; /* Max receive work queue indirection tables */
64 uint32_t max_rwq_indirection_table_size; /* Max receive work queue indirection table size */
65 uint64_t rx_hash_fields_mask; /* Mask with enum ibv_rx_hash_fields to know which incoming packet's field can participates in the RX hash */
66 uint8_t rx_hash_function; /* Mask with enum ibv_rx_hash_function_flags to know which hash functions are supported */
67 };
68
69 struct ibv_packet_pacing_caps {
70 uint32_t qp_rate_limit_min; /* Minimum rate limit in kbps */
71 uint32_t qp_rate_limit_max; /* Maximum rate limit in kbps */
72 uint32_t supported_qpts; /* Bitmap showing which QP types are supported. */
73 };
74
75 enum ibv_raw_packet_caps {
76 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING = 1 << 0, /* CVLAN stripping is supported */
77 IBV_RAW_PACKET_CAP_SCATTER_FCS = 1 << 1, /* FCS scattering is supported */
78 IBV_RAW_PACKET_CAP_IP_CSUM = 1 << 2, /* IP CSUM offload is supported */
79 };
80
81 enum ibv_tm_cap_flags {
82 IBV_TM_CAP_RC = 1 << 0, /* Support tag matching on RC transport */
83 };
84
85 struct ibv_tm_caps {
86 uint32_t max_rndv_hdr_size; /* Max size of rendezvous request header */
87 uint32_t max_num_tags; /* Max number of tagged buffers in a TM-SRQ matching list */
88 uint32_t flags; /* From enum ibv_tm_cap_flags */
89 uint32_t max_ops; /* Max number of outstanding list operations */
90 uint32_t max_sge; /* Max number of SGEs in a tagged buffer */
91 };
92
93 struct ibv_cq_moderation_caps {
94 uint16_t max_cq_count;
95 uint16_t max_cq_period;
96 };
97
98 Extended device capability flags (device_cap_flags_ex):
99
100 IBV_DEVICE_PCI_WRITE_END_PADDING
101
102 Indicates the device has support for padding PCI writes to a
103 full cache line.
104
105 Padding packets to full cache lines reduces the amount of traf‐
106 fic required at the memory controller at the expense of creating
107 more traffic on the PCI-E port.
108
109 Workloads that have a high CPU memory load and low PCI-E uti‐
110 lization will benefit from this feature, while workloads that
111 have a high PCI-E utilization and small packets will be harmed.
112
113 For instance, with a 128 byte cache line size, the transfer of
114 any packets less than 128 bytes will require a full 128 transfer
115 on PCI, potentially doubling the required PCI-E bandwidth.
116
117 This feature can be enabled on a QP or WQ basis via the
118 IBV_QP_CREATE_PCI_WRITE_END_PADDING or
119 IBV_WQ_FLAGS_PCI_WRITE_END_PADDING flags.
120
121
123 ibv_query_device_ex() returns 0 on success, or the value of errno on
124 failure (which indicates the failure reason).
125
127 The maximum values returned by this function are the upper limits of
128 supported resources by the device. However, it may not be possible to
129 use these maximum values, since the actual number of any resource that
130 can be created may be limited by the machine configuration, the amount
131 of host memory, user permissions, and the amount of resources already
132 in use by other users/processes.
133
135 ibv_query_device(3), ibv_open_device(3), ibv_query_port(3),
136 ibv_query_pkey(3), ibv_query_gid(3)
137
139 Majd Dibbiny <majd@mellanox.com>
140
141
142
143libibverbs 2014-12-17 IBV_QUERY_DEVICE_EX(3)