1LIBCXL(3) LIBCXL(3)
2
3
4
6 libcxl - A library to interact with CXL devices through sysfs(5) and
7 ioctl(2) interfaces
8
10 #include <cxl/libcxl.h>
11 cc ... -lcxl
12
14 libcxl provides interfaces to interact with CXL devices in Linux, using
15 sysfs interfaces for most kernel interactions, and the ioctl()
16 interface for command submission.
17
18 The starting point for all library interfaces is a cxl_ctx object,
19 returned by linklibcxl:cxl_new[3]. CXL Type 3 memory devices and other
20 CXL device objects are descendants of the cxl_ctx object, and can be
21 iterated via an object an iterator API of the form
22 cxl_<object>_foreach(<parent object>, <object iterator>).
23
25 The object representing a CXL memory expander (Type 3 device) is struct
26 cxl_memdev. Library interfaces related to these devices have the prefix
27 cxl_memdev_. These interfaces are mostly associated with sysfs
28 interactions (unless otherwise noted in their respective documentation
29 sections). They are typically used to retrieve data published by the
30 kernel, or to send data or trigger kernel operations for a given
31 device.
32
33 MEMDEV: Enumeration
34 struct cxl_memdev *cxl_memdev_get_first(struct cxl_ctx *ctx);
35 struct cxl_memdev *cxl_memdev_get_next(struct cxl_memdev *memdev);
36 struct cxl_ctx *cxl_memdev_get_ctx(struct cxl_memdev *memdev);
37 const char *cxl_memdev_get_host(struct cxl_memdev *memdev)
38 struct cxl_memdev *cxl_endpoint_get_memdev(struct cxl_endpoint *endpoint);
39
40 #define cxl_memdev_foreach(ctx, memdev) \
41 for (memdev = cxl_memdev_get_first(ctx); \
42 memdev != NULL; \
43 memdev = cxl_memdev_get_next(memdev))
44
45 CXL memdev instances are enumerated from the global library context
46 struct cxl_ctx. By default a memdev only offers a portal to submit
47 memory device commands, see the port, decoder, and endpoint APIs to
48 determine what if any CXL Memory Resources are reachable given a
49 specific memdev.
50
51 The host of a memdev is the PCIe Endpoint device that registered its
52 CXL capabilities with the Linux CXL core.
53
54 MEMDEV: Attributes
55 int cxl_memdev_get_id(struct cxl_memdev *memdev);
56 unsigned long long cxl_memdev_get_serial(struct cxl_memdev *memdev);
57 const char *cxl_memdev_get_devname(struct cxl_memdev *memdev);
58 int cxl_memdev_get_major(struct cxl_memdev *memdev);
59 int cxl_memdev_get_minor(struct cxl_memdev *memdev);
60 unsigned long long cxl_memdev_get_pmem_size(struct cxl_memdev *memdev);
61 unsigned long long cxl_memdev_get_ram_size(struct cxl_memdev *memdev);
62 const char *cxl_memdev_get_firmware_version(struct cxl_memdev *memdev);
63 size_t cxl_memdev_get_label_size(struct cxl_memdev *memdev);
64 int cxl_memdev_nvdimm_bridge_active(struct cxl_memdev *memdev);
65 int cxl_memdev_get_numa_node(struct cxl_memdev *memdev);
66
67 A memdev is given a kernel device name of the form "mem%d" where an id
68 (cxl_memdev_get_id()) is dynamically allocated as devices are
69 discovered. Note that there are no guarantees that ids / kernel device
70 names for memdevs are stable from one boot to the next, devices are
71 enumerated asynchronously. If a stable identifier is use
72 cxl_memdev_get_serial() which returns a value according to the Device
73 Serial Number Extended Capability in the PCIe 5.0 Base Specification.
74
75 The character device node for command submission can be found by
76 default at /dev/cxl/mem%d, or created with a major / minor returned
77 from cxl_memdev_get_{major,minor}().
78
79 The pmem_size and ram_size attributes return the current provisioning
80 of DPA (Device Physical Address / local capacity) in the device.
81
82 cxl_memdev_get_numa_node() returns the affinitized CPU node number if
83 available or -1 otherwise.
84
85 MEMDEV: Control
86 int cxl_memdev_disable_invalidate(struct cxl_memdev *memdev);
87 int cxl_memdev_enable(struct cxl_memdev *memdev);
88
89 When a memory device is disabled it unregisters its associated
90 endpoints and potentially intervening switch ports if there are no
91 other memdevs pinning that port active. That means that any existing
92 port objects that the library has previously returned are in valid and
93 need to be re-read. Callers must be careful to re-retrieve port objects
94 after cxl_memdev_disable_invalidate(). Any usage of a previously
95 obtained port object after a cxl_memdev_disable_invalidate() call is a
96 use-after-free programming error. It follows that after
97 cxl_memdev_enable() new ports may appear in the topology that were not
98 previously enumerable.
99
100 Note
101 cxl_memdev_disable_invalidate() will force disable the memdev
102 regardless of whether the memory provided by the device is in
103 active use by the operating system. Callers take responsibility for
104 assuring that it is safe to disable the memory device. Otherwise,
105 this call can be as destructive as ripping a DIMM out of a running
106 system. Like all other libcxl calls that mutate the system state or
107 divulge security sensitive information this call requires root /
108 CAP_SYS_ADMIN.
109
110 MEMDEV: Commands
111 struct cxl_cmd *cxl_cmd_new_raw(struct cxl_memdev *memdev, int opcode);
112 struct cxl_cmd *cxl_cmd_new_identify(struct cxl_memdev *memdev);
113 struct cxl_cmd *cxl_cmd_new_get_health_info(struct cxl_memdev *memdev);
114 struct cxl_cmd *cxl_cmd_new_get_alert_config(struct cxl_memdev *memdev);
115 struct cxl_cmd *cxl_cmd_new_read_label(struct cxl_memdev *memdev,
116 unsigned int offset, unsigned int length);
117 struct cxl_cmd *cxl_cmd_new_write_label(struct cxl_memdev *memdev, void *buf,
118 unsigned int offset, unsigned int length);
119 int cxl_memdev_zero_label(struct cxl_memdev *memdev, size_t length,
120 size_t offset);
121 int cxl_memdev_read_label(struct cxl_memdev *memdev, void *buf, size_t length,
122 size_t offset);
123 int cxl_memdev_write_label(struct cxl_memdev *memdev, void *buf, size_t length,
124 size_t offset);
125 struct cxl_cmd *cxl_cmd_new_get_partition(struct cxl_memdev *memdev);
126 struct cxl_cmd *cxl_cmd_new_set_partition(struct cxl_memdev *memdev,
127 unsigned long long volatile_size);
128
129 A cxl_cmd is a reference counted object which is used to perform
130 Mailbox commands as described in the CXL Specification. A cxl_cmd
131 object is tied to a cxl_memdev. Associated library interfaces have the
132 prefix cxl_cmd_. Within this sub-class of interfaces, there are:
133
134 • cxl_cmd_new_*() interfaces that allocate a new cxl_cmd object for a
135 given command type targeted at a given memdev. As part of the
136 command instantiation process the library validates that the
137 command is supported by the memory device, otherwise it returns
138 NULL to indicate no support. The libcxl command id is translated by
139 the kernel into a CXL standard opcode. See the potential command
140 ids in /usr/include/linux/cxl_mem.h.
141
142 • cxl_cmd_<name>_set_<field> interfaces that set specific fields in a
143 cxl_cmd
144
145 • cxl_cmd_submit which submits the command via ioctl()
146
147 • cxl_cmd_<name>_get_<field> interfaces that get specific fields out
148 of the command response
149
150 • cxl_cmd_get_* interfaces to get general command related
151 information.
152
153 cxl_cmd_new_raw() supports so called RAW commands where the command id
154 is RAW and it carries an unmodified CXL memory device command payload
155 associated with the opcode argument. Given the kernel does minimal
156 input validation on these commands typically raw commands are not
157 supported by the kernel outside debug build scenarios. libcxl is
158 limited to supporting commands that appear in the CXL standard / public
159 specifications.
160
161 cxl_memdev{read,write,zero}_label() are helpers for marshaling multiple
162 label access commands over an arbitrary extent of the device’s label
163 area.
164
165 cxl_cmd_partition_set_mode() supports selecting NEXTBOOT or IMMEDIATE
166 mode. When CXL_SETPART_IMMEDIATE mode is set, it is the caller’s
167 responsibility to avoid immediate changes to partitioning when the
168 device is in use. When CXL_SETPART_NEXTBOOT mode is set, the change in
169 partitioning shall become the “next” configuration, to become active on
170 the next device reset.
171
173 The CXL Memory space is CPU and Device coherent. The address ranges
174 that support coherent access are described by platform firmware and
175 communicated to the operating system via a CXL root object struct
176 cxl_bus.
177
178 BUS: Enumeration
179 struct cxl_bus *cxl_bus_get_first(struct cxl_ctx *ctx);
180 struct cxl_bus *cxl_bus_get_next(struct cxl_bus *bus);
181 struct cxl_ctx *cxl_bus_get_ctx(struct cxl_bus *bus);
182 struct cxl_bus *cxl_memdev_get_bus(struct cxl_memdev *memdev);
183 struct cxl_bus *cxl_port_get_bus(struct cxl_port *port);
184 struct cxl_bus *cxl_endpoint_get_bus(struct cxl_endpoint *endpoint);
185
186 #define cxl_bus_foreach(ctx, bus) \
187 for (bus = cxl_bus_get_first(ctx); bus != NULL; \
188 bus = cxl_bus_get_next(bus))
189
190 When a memdev is active it has established a CXL port hierarchy between
191 itself and the root of its associated CXL topology. The
192 cxl_{memdev,endpoint}_get_bus() helpers walk that topology to retrieve
193 the associated bus object.
194
195 BUS: Attributes
196 const char *cxl_bus_get_provider(struct cxl_bus *bus);
197 const char *cxl_bus_get_devname(struct cxl_bus *bus);
198 int cxl_bus_get_id(struct cxl_bus *bus);
199
200 The provider name of a bus is a persistent name that is independent of
201 discovery order. The possible provider names are ACPI.CXL and cxl_test.
202 The devname and id attributes, like other objects, are just the kernel
203 device names that are subject to change based on discovery order.
204
205 BUS: Control
206 int cxl_bus_disable_invalidate(struct cxl_bus *bus);
207
208 An entire CXL topology can be torn down with this API. Like other
209 _invalidate APIs callers must assume that all library objects have been
210 freed. This one goes one step further and also frees the @bus argument.
211 This may crash the system and is only useful in kernel driver
212 development scenarios.
213
215 CXL ports track the PCIe hierarchy between a platform firmware CXL root
216 object, through CXL / PCIe Host Bridges, CXL / PCIe Root Ports, and CXL
217 / PCIe Switch Ports.
218
219 PORT: Enumeration
220 struct cxl_port *cxl_bus_get_port(struct cxl_bus *bus);
221 struct cxl_port *cxl_port_get_first(struct cxl_port *parent);
222 struct cxl_port *cxl_port_get_next(struct cxl_port *port);
223 struct cxl_port *cxl_port_get_parent(struct cxl_port *port);
224 struct cxl_ctx *cxl_port_get_ctx(struct cxl_port *port);
225 const char *cxl_port_get_host(struct cxl_port *port);
226 struct cxl_port *cxl_decoder_get_port(struct cxl_decoder *decoder);
227 struct cxl_port *cxl_port_get_next_all(struct cxl_port *port,
228 const struct cxl_port *top);
229 struct cxl_port *cxl_dport_get_port(struct cxl_dport *dport);
230
231 #define cxl_port_foreach(parent, port) \
232 for (port = cxl_port_get_first(parent); port != NULL; \
233 port = cxl_port_get_next(port))
234
235 #define cxl_port_foreach_all(top, port) \
236 for (port = cxl_port_get_first(top); port != NULL; \
237 port = cxl_port_get_next_all(port, top))
238
239 A bus object encapsulates a CXL port object. Use cxl_bus_get_port() to
240 use generic port APIs on root objects.
241
242 Ports are hierarchical. All but the a root object have another CXL port
243 as a parent object retrievable via cxl_port_get_parent().
244
245 The root port of a hiearchy can be retrieved via any port instance in
246 that hierarchy via cxl_port_get_bus().
247
248 The host of a port is the corresponding device name of the PCIe Root
249 Port, or Switch Upstream Port with CXL capabilities.
250
251 The cxl_port_foreach_all() helper does a depth first iteration of all
252 ports beneath the top port argument.
253
254 PORT: Control
255 --- int cxl_port_disable_invalidate(struct cxl_port *port); int
256 cxl_port_enable(struct cxl_port *port); ---
257 cxl_port_disable_invalidate() is a violent operation that disables
258 entire sub-tree of CXL Memory Device and Ports, only use it for test /
259 debug scenarios, or ensuring that all impacted devices are deactivated
260 first.
261
262 PORT: Attributes
263 const char *cxl_port_get_devname(struct cxl_port *port);
264 int cxl_port_get_id(struct cxl_port *port);
265 int cxl_port_is_enabled(struct cxl_port *port);
266 bool cxl_port_is_root(struct cxl_port *port);
267 bool cxl_port_is_switch(struct cxl_port *port);
268 bool cxl_port_is_endpoint(struct cxl_port *port);
269 int cxl_port_get_depth(struct cxl_port *port);
270 bool cxl_port_hosts_memdev(struct cxl_port *port, struct cxl_memdev *memdev);
271 int cxl_port_get_nr_dports(struct cxl_port *port);
272
273 The port type is communicated via cxl_port_is_<type>(). An enabled port
274 is one that has succeeded in discovering the CXL component registers in
275 the host device and has enumerated its downstream ports. In order for a
276 memdev to be enabled for CXL memory operation all CXL ports in its
277 ancestry must also be enabled including a root port, an arbitrary
278 number of intervening switch ports, and a terminal endpoint port.
279
280 cxl_port_hosts_memdev() returns true if the port’s host appears in the
281 memdev host’s device topology ancestry.
282
283 DPORTS
284 A CXL dport object represents a CXL / PCIe Switch Downstream Port,
285 or a CXL / PCIe host bridge.
286
287 struct cxl_dport *cxl_dport_get_first(struct cxl_port *port);
288 struct cxl_dport *cxl_dport_get_next(struct cxl_dport *dport);
289 struct cxl_dport *cxl_port_get_dport_by_memdev(struct cxl_port *port,
290 struct cxl_memdev *memdev);
291
292 #define cxl_dport_foreach(port, dport) \
293 for (dport = cxl_dport_get_first(port); dport != NULL; \
294 dport = cxl_dport_get_next(dport))
295
296 const char *cxl_dport_get_devname(struct cxl_dport *dport);
297 const char *cxl_dport_get_physical_node(struct cxl_dport *dport);
298 int cxl_dport_get_id(struct cxl_dport *dport);
299 bool cxl_dport_maps_memdev(struct cxl_dport *dport, struct cxl_memdev *memdev);
300
301 The id of a dport is the hardware identifier used by an upstream
302 port to reference a downstream port. The physical node of a dport
303 is only available for platform firmware defined downstream ports
304 and alias the companion object, like a PCI host bridge, in the PCI
305 device hierarchy.
306
307 The cxl_dport_maps_memdev() helper checks if a dport is an ancestor
308 of a given memdev.
309
311 CXL endpoint objects encapsulate the set of host-managed device-memory
312 (HDM) decoders in a physical memory device. The endpoint is the last
313 hop in a decoder chain that translate SPA to DPA
314 (system-physical-address to device-local-physical-address).
315
316 ENDPOINT: Enumeration
317 struct cxl_endpoint *cxl_endpoint_get_first(struct cxl_port *parent);
318 struct cxl_endpoint *cxl_endpoint_get_next(struct cxl_endpoint *endpoint);
319 struct cxl_ctx *cxl_endpoint_get_ctx(struct cxl_endpoint *endpoint);
320 struct cxl_port *cxl_endpoint_get_parent(struct cxl_endpoint *endpoint);
321 struct cxl_port *cxl_endpoint_get_port(struct cxl_endpoint *endpoint);
322 const char *cxl_endpoint_get_host(struct cxl_endpoint *endpoint);
323 struct cxl_endpoint *cxl_memdev_get_endpoint(struct cxl_memdev *memdev);
324 struct cxl_endpoint *cxl_port_to_endpoint(struct cxl_port *port);
325
326 #define cxl_endpoint_foreach(port, endpoint) \
327 for (endpoint = cxl_endpoint_get_first(port); endpoint != NULL; \
328 endpoint = cxl_endpoint_get_next(endpoint))
329
330 ENDPOINT: Attributes
331 const char *cxl_endpoint_get_devname(struct cxl_endpoint *endpoint);
332 int cxl_endpoint_get_id(struct cxl_endpoint *endpoint);
333 int cxl_endpoint_is_enabled(struct cxl_endpoint *endpoint);
334
336 Decoder objects are associated with the "HDM Decoder Capability"
337 published in Port devices and CXL capable PCIe endpoints. The kernel
338 additionally models platform firmware described CXL memory ranges (like
339 the ACPI CEDT.CFMWS) as static decoder objects. They route System
340 Physical Addresses through a port topology to an endpoint decoder that
341 does the final translation from SPA to DPA (system-physical-address to
342 device-local-physical-address).
343
344 DECODER: Enumeration
345 struct cxl_decoder *cxl_decoder_get_first(struct cxl_port *port);
346 struct cxl_decoder *cxl_decoder_get_next(struct cxl_decoder *decoder);
347 struct cxl_ctx *cxl_decoder_get_ctx(struct cxl_decoder *decoder);
348 struct cxl_decoder *cxl_target_get_decoder(struct cxl_target *target);
349
350 #define cxl_decoder_foreach(port, decoder) \
351 for (decoder = cxl_decoder_get_first(port); decoder != NULL; \
352 decoder = cxl_decoder_get_next(decoder))
353
354 The definition of a CXL port in libcxl is an object that hosts one or
355 more CXL decoder objects.
356
357 DECODER: Attributes
358 unsigned long long cxl_decoder_get_resource(struct cxl_decoder *decoder);
359 unsigned long long cxl_decoder_get_size(struct cxl_decoder *decoder);
360 unsigned long long cxl_decoder_get_dpa_resource(struct cxl_decoder *decoder);
361 unsigned long long cxl_decoder_get_dpa_size(struct cxl_decoder *decoder);
362 int cxl_decoder_set_dpa_size(struct cxl_decoder *decoder, unsigned long long size);
363 const char *cxl_decoder_get_devname(struct cxl_decoder *decoder);
364 int cxl_decoder_get_id(struct cxl_decoder *decoder);
365 int cxl_decoder_get_nr_targets(struct cxl_decoder *decoder);
366 struct cxl_region *cxl_decoder_get_region(struct cxl_decoder *decoder);
367
368 enum cxl_decoder_target_type {
369 CXL_DECODER_TTYPE_UNKNOWN,
370 CXL_DECODER_TTYPE_EXPANDER,
371 CXL_DECODER_TTYPE_ACCELERATOR,
372 };
373
374 cxl_decoder_get_target_type(struct cxl_decoder *decoder);
375
376 enum cxl_decoder_mode {
377 CXL_DECODER_MODE_NONE,
378 CXL_DECODER_MODE_MIXED,
379 CXL_DECODER_MODE_PMEM,
380 CXL_DECODER_MODE_RAM,
381 };
382 enum cxl_decoder_mode cxl_decoder_get_mode(struct cxl_decoder *decoder);
383 int cxl_decoder_set_mode(struct cxl_decoder *decoder, enum cxl_decoder_mode mode);
384
385 bool cxl_decoder_is_pmem_capable(struct cxl_decoder *decoder);
386 bool cxl_decoder_is_volatile_capable(struct cxl_decoder *decoder);
387 bool cxl_decoder_is_mem_capable(struct cxl_decoder *decoder);
388 bool cxl_decoder_is_accelmem_capable(struct cxl_decoder *decoder);
389 bool cxl_decoder_is_locked(struct cxl_decoder *decoder);
390
391 The kernel protects the enumeration of the physical address layout of
392 the system. Without CAP_SYS_ADMIN cxl_decoder_get_resource() returns
393 ULLONG_MAX to indicate that the address information was not
394 retrievable. Otherwise, cxl_decoder_get_resource() returns the
395 currently programmed value of the base of the decoder’s decode range. A
396 zero-sized decoder indicates a disabled decoder.
397
398 Root level decoders only support limited set of memory types in their
399 address range. The cxl_decoder_is_<memtype>_capable() helpers identify
400 what is supported. Switch level decoders, in contrast are capable of
401 routing any memory type, i.e. they just forward along the memory type
402 support from their parent port. Endpoint decoders follow the
403 capabilities of their host memory device.
404
405 The capabilities of a decoder are not to be confused with their type /
406 mode. The type ultimately depends on the endpoint. For example an
407 accelerator requires all decoders in its ancestry to be set to
408 CXL_DECODER_TTYPE_ACCELERATOR, and conversely plain memory expander
409 devices require CXL_DECODER_TTYPE_EXPANDER.
410
411 Platform firmware may setup the CXL decode hierarchy before the OS
412 boots, and may additionally require that the OS not change the decode
413 settings. This property is indicated by the cxl_decoder_is_locked()
414 API.
415
416 When a decoder is associated with a region cxl_decoder_get_region()
417 returns that region object. Note that it is only applicable to switch
418 and endpoint decoders as root decoders have a 1:N relationship with
419 regions. Use cxl_region_foreach() for the similar functionality for
420 root decoders.
421
422 TARGETS
423 A root or switch level decoder takes an SPA
424 (system-physical-address) as input and routes it to a downstream
425 port. Which downstream port depends on the downstream port’s
426 position in the interleave. A struct cxl_target object represents
427 the properties of a given downstream port relative to its
428 interleave configuration.
429
430 struct cxl_target *cxl_decoder_get_target_by_memdev(struct cxl_decoder *decoder,
431 struct cxl_memdev *memdev);
432 struct cxl_target *
433 cxl_decoder_get_target_by_position(struct cxl_decoder *decoder, int position);
434 struct cxl_target *cxl_target_get_first(struct cxl_decoder *decoder);
435 struct cxl_target *cxl_target_get_next(struct cxl_target *target);
436
437 #define cxl_target_foreach(decoder, target) \
438 for (target = cxl_target_get_first(decoder); target != NULL; \
439 target = cxl_target_get_next(target))
440
441 Target objects can only be enumerated if the decoder has been
442 configured, for switch decoders. For root decoders they are always
443 available since the root decoder target mapping is static. The
444 cxl_decoder_get_target_by_memdev() helper walks the topology to
445 validate if the given memory device is capable of receiving cycles
446 from this upstream decoder. It does not validate if the memory
447 device is currently configured to participate in that decode.
448
449 int cxl_target_get_position(struct cxl_target *target);
450 unsigned long cxl_target_get_id(struct cxl_target *target);
451 const char *cxl_target_get_devname(struct cxl_target *target);
452 bool cxl_target_maps_memdev(struct cxl_target *target,
453 struct cxl_memdev *memdev);
454 const char *cxl_target_get_physical_node(struct cxl_target *target);
455
456 The position of a decoder along with the interleave granularity
457 dictate which address in the decoder’s resource range map to which
458 port.
459
460 The target id is an identifier that the CXL port uses to reference
461 this downstream port. For CXL / PCIe downstream switch ports the id
462 is defined by the PCIe Link Capability Port Number field. For root
463 decoders the id is specified by platform firmware specific
464 mechanism. For ACPI.CXL defined root ports the id comes from the
465 CEDT.CHBS / ACPI0016 _UID.
466
467 The device name of a target is the name of the host device for the
468 downstream port. For CXL / PCIe downstream ports the devname is
469 downstream switch port PCI device. For CXL root ports the devname
470 is a platform firmware object for the host bridge like a ACPI0016
471 device instance.
472
473 The cxl_target_maps_memdev() helper is the companion of
474 cxl_decoder_get_target_by_memdev() to determine which downstream
475 ports / targets are capable of mapping which memdevs.
476
477 Some platform firmware implementations define an alias / companion
478 device to represent the root of a PCI device hierarchy. The
479 cxl_target_get_physical_node() helper returns the device name of
480 that companion object in the PCI hierarchy.
481
482 REGIONS
483 A CXL region is composed of one or more slices of CXL memdevs, with
484 configurable interleave settings - both the number of interleave
485 ways, and the interleave granularity. In terms of hierarchy, it is
486 the child of a CXL root decoder. A root decoder (recall that this
487 corresponds to an ACPI CEDT.CFMWS window), may have multiple child
488 regions, but a region is strictly tied to one root decoder.
489
490 The slices that compose a region are called mappings. A mapping is
491 a tuple of memdev, endpoint decoder, and the position.
492
493 struct cxl_region *cxl_region_get_first(struct cxl_decoder *decoder);
494 struct cxl_region *cxl_region_get_next(struct cxl_region *region);
495
496 #define cxl_region_foreach(decoder, region) \
497 for (region = cxl_region_get_first(decoder); region != NULL; \
498 region = cxl_region_get_next(region))
499
500 #define cxl_region_foreach_safe(decoder, region, _region) \
501 for (region = cxl_region_get_first(decoder), \
502 _region = region ? cxl_region_get_next(region) : NULL; \
503 region != NULL; \
504 region = _region, \
505 _region = _region ? cxl_region_get_next(_region) : NULL)
506
507 int cxl_region_get_id(struct cxl_region *region);
508 const char *cxl_region_get_devname(struct cxl_region *region);
509 void cxl_region_get_uuid(struct cxl_region *region, uuid_t uu);
510 unsigned long long cxl_region_get_size(struct cxl_region *region);
511 unsigned long long cxl_region_get_resource(struct cxl_region *region);
512 unsigned int cxl_region_get_interleave_ways(struct cxl_region *region);
513 unsigned int cxl_region_get_interleave_granularity(struct cxl_region *region);
514 struct cxl_decoder *cxl_region_get_target_decoder(struct cxl_region *region,
515 int position);
516 int cxl_region_set_size(struct cxl_region *region, unsigned long long size);
517 int cxl_region_set_uuid(struct cxl_region *region, uuid_t uu);
518 int cxl_region_set_interleave_ways(struct cxl_region *region,
519 unsigned int ways);
520 int cxl_region_set_interleave_granularity(struct cxl_region *region,
521 unsigned int granularity);
522 int cxl_region_set_target(struct cxl_region *region, int position,
523 struct cxl_decoder *decoder);
524 int cxl_region_clear_target(struct cxl_region *region, int position);
525 int cxl_region_clear_all_targets(struct cxl_region *region);
526 int cxl_region_decode_commit(struct cxl_region *region);
527 int cxl_region_decode_reset(struct cxl_region *region);
528
529 A region’s resource attribute is the Host Physical Address at which
530 the region’s address space starts. The region’s address space is a
531 subset of the parent root decoder’s address space.
532
533 The interleave ways is the number of component memdevs
534 participating in the region.
535
536 The interleave granularity depends on the root decoder’s
537 granularity, and must follow the interleave math rules defined in
538 the CXL spec.
539
540 Regions have a list of targets 0..N, which are programmed with the
541 name of an endpoint decoder under each participating memdev.
542
543 The decode_commit and decode_reset attributes reserve and free DPA
544 space on a given memdev by allocating an endpoint decoder, and
545 programming it based on the region’s interleave geometry.
546
548 Copyright © 2016 - 2022, Intel Corporation. License GPLv2: GNU GPL
549 version 2 http://gnu.org/licenses/gpl.html. This is free software: you
550 are free to change and redistribute it. There is NO WARRANTY, to the
551 extent permitted by law.
552
554 linklibcxl:cxl[1]
555
556
557
558 01/13/2023 LIBCXL(3)