1membarrier(2)                 System Calls Manual                membarrier(2)
2
3
4

NAME

6       membarrier - issue memory barriers on a set of threads
7

LIBRARY

9       Standard C library (libc, -lc)
10

SYNOPSIS

12       #include <linux/membarrier.h> /* Definition of MEMBARRIER_* constants */
13       #include <sys/syscall.h>      /* Definition of SYS_* constants */
14       #include <unistd.h>
15
16       int syscall(SYS_membarrier, int cmd, unsigned int flags, int cpu_id);
17
18       Note: glibc provides no wrapper for membarrier(), necessitating the use
19       of syscall(2).
20

DESCRIPTION

22       The membarrier() system call helps reducing the overhead of the  memory
23       barrier  instructions  required  to order memory accesses on multi-core
24       systems.  However, this system call is heavier than a  memory  barrier,
25       so  using  it effectively is not as simple as replacing memory barriers
26       with this system call, but requires understanding of the details below.
27
28       Use of memory barriers needs to be done taking into account that a mem‐
29       ory  barrier  always needs to be either matched with its memory barrier
30       counterparts, or that the architecture's memory model  doesn't  require
31       the matching barriers.
32
33       There  are cases where one side of the matching barriers (which we will
34       refer to as "fast side") is executed much more  often  than  the  other
35       (which  we  will  refer to as "slow side").  This is a prime target for
36       the use of membarrier().  The key idea is to replace, for these  match‐
37       ing  barriers,  the fast-side memory barriers by simple compiler barri‐
38       ers, for example:
39
40           asm volatile ("" : : : "memory")
41
42       and replace the slow-side memory barriers by calls to membarrier().
43
44       This will add overhead to the slow side, and remove overhead  from  the
45       fast side, thus resulting in an overall performance increase as long as
46       the slow side is infrequent enough that the  overhead  of  the  membar‐
47       rier() calls does not outweigh the performance gain on the fast side.
48
49       The cmd argument is one of the following:
50
51       MEMBARRIER_CMD_QUERY (since Linux 4.3)
52              Query  the  set  of supported commands.  The return value of the
53              call is a bit mask of supported commands.  MEMBARRIER_CMD_QUERY,
54              which  has the value 0, is not itself included in this bit mask.
55              This command is always supported (on kernels where  membarrier()
56              is provided).
57
58       MEMBARRIER_CMD_GLOBAL (since Linux 4.16)
59              Ensure  that  all  threads from all processes on the system pass
60              through a state where all  memory  accesses  to  user-space  ad‐
61              dresses match program order between entry to and return from the
62              membarrier() system call.  All threads on the  system  are  tar‐
63              geted by this command.
64
65       MEMBARRIER_CMD_GLOBAL_EXPEDITED (since Linux 4.16)
66              Execute a memory barrier on all running threads of all processes
67              that   previously    registered    with    MEMBARRIER_CMD_REGIS‐
68              TER_GLOBAL_EXPEDITED.
69
70              Upon return from the system call, the calling thread has a guar‐
71              antee that all running threads have passed through a state where
72              all  memory accesses to user-space addresses match program order
73              between entry to and return from the  system  call  (non-running
74              threads  are  de facto in such a state).  This guarantee is pro‐
75              vided only for the threads of processes that  previously  regis‐
76              tered with MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
77
78              Given  that registration is about the intent to receive the bar‐
79              riers, it is  valid  to  invoke  MEMBARRIER_CMD_GLOBAL_EXPEDITED
80              from  a  process  that  has  not  employed MEMBARRIER_CMD_REGIS‐
81              TER_GLOBAL_EXPEDITED.
82
83              The "expedited" commands complete faster than the  non-expedited
84              ones;  they  never block, but have the downside of causing extra
85              overhead.
86
87       MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED (since Linux 4.16)
88              Register   the   process's    intent    to    receive    MEMBAR‐
89              RIER_CMD_GLOBAL_EXPEDITED memory barriers.
90
91       MEMBARRIER_CMD_PRIVATE_EXPEDITED (since Linux 4.14)
92              Execute a memory barrier on each running thread belonging to the
93              same process as the calling thread.
94
95              Upon return from the system call, the calling thread has a guar‐
96              antee that all its running thread siblings have passed through a
97              state where all memory accesses to  user-space  addresses  match
98              program  order  between entry to and return from the system call
99              (non-running threads are de facto in such a state).  This  guar‐
100              antee  is  provided  only for threads in the same process as the
101              calling thread.
102
103              The "expedited" commands complete faster than the  non-expedited
104              ones;  they  never block, but have the downside of causing extra
105              overhead.
106
107              A process must register its intent to use the private  expedited
108              command prior to using it.
109
110       MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED (since Linux 4.14)
111              Register  the process's intent to use MEMBARRIER_CMD_PRIVATE_EX‐
112              PEDITED.
113
114       MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE (since Linux 4.16)
115              In addition to providing  the  memory  ordering  guarantees  de‐
116              scribed  in  MEMBARRIER_CMD_PRIVATE_EXPEDITED,  upon return from
117              system call the calling thread has a guarantee that all its run‐
118              ning  thread  siblings have executed a core serializing instruc‐
119              tion.  This guarantee is provided only for threads in  the  same
120              process as the calling thread.
121
122              The  "expedited" commands complete faster than the non-expedited
123              ones, they never block, but have the downside of  causing  extra
124              overhead.
125
126              A  process must register its intent to use the private expedited
127              sync core command prior to using it.
128
129       MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE (since Linux 4.16)
130              Register the process's intent to use  MEMBARRIER_CMD_PRIVATE_EX‐
131              PEDITED_SYNC_CORE.
132
133       MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ (since Linux 5.10)
134              Ensure the caller thread, upon return from system call, that all
135              its running thread siblings  have  any  currently  running  rseq
136              critical  sections  restarted  if flags parameter is 0; if flags
137              parameter is MEMBARRIER_CMD_FLAG_CPU,  then  this  operation  is
138              performed  only  on  CPU indicated by cpu_id.  This guarantee is
139              provided only for threads in the same  process  as  the  calling
140              thread.
141
142              RSEQ  membarrier  is  only  available in the "private expedited"
143              form.
144
145              A process must register its intent to use the private  expedited
146              rseq command prior to using it.
147
148       MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ (since Linux 5.10)
149              Register  the process's intent to use MEMBARRIER_CMD_PRIVATE_EX‐
150              PEDITED_RSEQ.
151
152       MEMBARRIER_CMD_SHARED (since Linux 4.3)
153              This is an  alias  for  MEMBARRIER_CMD_GLOBAL  that  exists  for
154              header backward compatibility.
155
156       The flags argument must be specified as 0 unless the command is MEMBAR‐
157       RIER_CMD_PRIVATE_EXPEDITED_RSEQ, in which case flags can be either 0 or
158       MEMBARRIER_CMD_FLAG_CPU.
159
160       The cpu_id argument is ignored unless flags is MEMBARRIER_CMD_FLAG_CPU,
161       in which case it must specify the CPU targeted by this membarrier  com‐
162       mand.
163
164       All  memory  accesses  performed  in  program  order from each targeted
165       thread are guaranteed to be ordered with respect to membarrier().
166
167       If we use the semantic barrier() to represent a compiler barrier  forc‐
168       ing  memory  accesses  to be performed in program order across the bar‐
169       rier, and smp_mb() to represent explicit memory barriers  forcing  full
170       memory  ordering across the barrier, we have the following ordering ta‐
171       ble for each pairing of barrier(),  membarrier(),  and  smp_mb().   The
172       pair ordering is detailed as (O: ordered, X: not ordered):
173
174                             barrier()   smp_mb()   membarrier()
175              barrier()          X          X            O
176              smp_mb()           X          O            O
177              membarrier()       O          O            O
178

RETURN VALUE

180       On  success,  the  MEMBARRIER_CMD_QUERY operation returns a bit mask of
181       supported   commands,   and    the    MEMBARRIER_CMD_GLOBAL,    MEMBAR‐
182       RIER_CMD_GLOBAL_EXPEDITED,    MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED,
183       MEMBARRIER_CMD_PRIVATE_EXPEDITED, MEMBARRIER_CMD_REGISTER_PRIVATE_EXPE‐
184       DITED,    MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE,    and    MEMBAR‐
185       RIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE operations  return  zero.
186       On error, -1 is returned, and errno is set to indicate the error.
187
188       For  a  given command, with flags set to 0, this system call is guaran‐
189       teed to always return the same value until reboot.  Further calls  with
190       the same arguments will lead to the same result.  Therefore, with flags
191       set to 0, error handling is required only for the first call to membar‐
192       rier().
193

ERRORS

195       EINVAL cmd   is   invalid,   or   flags  is  nonzero,  or  the  MEMBAR‐
196              RIER_CMD_GLOBAL command is disabled because  the  nohz_full  CPU
197              parameter  has  been  set,  or  the MEMBARRIER_CMD_PRIVATE_EXPE‐
198              DITED_SYNC_CORE    and     MEMBARRIER_CMD_REGISTER_PRIVATE_EXPE‐
199              DITED_SYNC_CORE  commands  are  not implemented by the architec‐
200              ture.
201
202       ENOSYS The membarrier() system call is not implemented by this kernel.
203
204       EPERM  The current process was not registered prior  to  using  private
205              expedited commands.
206

STANDARDS

208       Linux.
209

HISTORY

211       Linux 4.3.
212
213       Before Linux 5.10, the prototype was:
214
215           int membarrier(int cmd, int flags);
216

NOTES

218       A  memory  barrier instruction is part of the instruction set of archi‐
219       tectures with weakly ordered memory models.  It orders memory  accesses
220       prior  to  the  barrier  and after the barrier with respect to matching
221       barriers on other cores.  For instance, a load fence  can  order  loads
222       prior  to  and  following  that fence with respect to stores ordered by
223       store fences.
224
225       Program order is the order in which instructions  are  ordered  in  the
226       program assembly code.
227
228       Examples  where  membarrier()  can be useful include implementations of
229       Read-Copy-Update libraries and garbage collectors.
230

EXAMPLES

232       Assuming a multithreaded application where  "fast_path()"  is  executed
233       very  frequently, and where "slow_path()" is executed infrequently, the
234       following code (x86) can be transformed using membarrier():
235
236           #include <stdlib.h>
237
238           static volatile int a, b;
239
240           static void
241           fast_path(int *read_b)
242           {
243               a = 1;
244               asm volatile ("mfence" : : : "memory");
245               *read_b = b;
246           }
247
248           static void
249           slow_path(int *read_a)
250           {
251               b = 1;
252               asm volatile ("mfence" : : : "memory");
253               *read_a = a;
254           }
255
256           int
257           main(void)
258           {
259               int read_a, read_b;
260
261               /*
262                * Real applications would call fast_path() and slow_path()
263                * from different threads. Call those from main() to keep
264                * this example short.
265                */
266
267               slow_path(&read_a);
268               fast_path(&read_b);
269
270               /*
271                * read_b == 0 implies read_a == 1 and
272                * read_a == 0 implies read_b == 1.
273                */
274
275               if (read_b == 0 && read_a == 0)
276                   abort();
277
278               exit(EXIT_SUCCESS);
279           }
280
281       The code above transformed to use membarrier() becomes:
282
283           #define _GNU_SOURCE
284           #include <stdlib.h>
285           #include <stdio.h>
286           #include <unistd.h>
287           #include <sys/syscall.h>
288           #include <linux/membarrier.h>
289
290           static volatile int a, b;
291
292           static int
293           membarrier(int cmd, unsigned int flags, int cpu_id)
294           {
295               return syscall(__NR_membarrier, cmd, flags, cpu_id);
296           }
297
298           static int
299           init_membarrier(void)
300           {
301               int ret;
302
303               /* Check that membarrier() is supported. */
304
305               ret = membarrier(MEMBARRIER_CMD_QUERY, 0, 0);
306               if (ret < 0) {
307                   perror("membarrier");
308                   return -1;
309               }
310
311               if (!(ret & MEMBARRIER_CMD_GLOBAL)) {
312                   fprintf(stderr,
313                       "membarrier does not support MEMBARRIER_CMD_GLOBAL\n");
314                   return -1;
315               }
316
317               return 0;
318           }
319
320           static void
321           fast_path(int *read_b)
322           {
323               a = 1;
324               asm volatile ("" : : : "memory");
325               *read_b = b;
326           }
327
328           static void
329           slow_path(int *read_a)
330           {
331               b = 1;
332               membarrier(MEMBARRIER_CMD_GLOBAL, 0, 0);
333               *read_a = a;
334           }
335
336           int
337           main(int argc, char *argv[])
338           {
339               int read_a, read_b;
340
341               if (init_membarrier())
342                   exit(EXIT_FAILURE);
343
344               /*
345                * Real applications would call fast_path() and slow_path()
346                * from different threads. Call those from main() to keep
347                * this example short.
348                */
349
350               slow_path(&read_a);
351               fast_path(&read_b);
352
353               /*
354                * read_b == 0 implies read_a == 1 and
355                * read_a == 0 implies read_b == 1.
356                */
357
358               if (read_b == 0 && read_a == 0)
359                   abort();
360
361               exit(EXIT_SUCCESS);
362           }
363
364
365
366Linux man-pages 6.05              2023-05-03                     membarrier(2)
Impressum