master
  1#ifndef _LINUX_MEMBARRIER_H
  2#define _LINUX_MEMBARRIER_H
  3
  4/*
  5 * linux/membarrier.h
  6 *
  7 * membarrier system call API
  8 *
  9 * Copyright (c) 2010, 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 10 *
 11 * Permission is hereby granted, free of charge, to any person obtaining a copy
 12 * of this software and associated documentation files (the "Software"), to deal
 13 * in the Software without restriction, including without limitation the rights
 14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 15 * copies of the Software, and to permit persons to whom the Software is
 16 * furnished to do so, subject to the following conditions:
 17 *
 18 * The above copyright notice and this permission notice shall be included in
 19 * all copies or substantial portions of the Software.
 20 *
 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 27 * SOFTWARE.
 28 */
 29
 30/**
 31 * enum membarrier_cmd - membarrier system call command
 32 * @MEMBARRIER_CMD_QUERY:   Query the set of supported commands. It returns
 33 *                          a bitmask of valid commands.
 34 * @MEMBARRIER_CMD_GLOBAL:  Execute a memory barrier on all running threads.
 35 *                          Upon return from system call, the caller thread
 36 *                          is ensured that all running threads have passed
 37 *                          through a state where all memory accesses to
 38 *                          user-space addresses match program order between
 39 *                          entry to and return from the system call
 40 *                          (non-running threads are de facto in such a
 41 *                          state). This covers threads from all processes
 42 *                          running on the system. This command returns 0.
 43 * @MEMBARRIER_CMD_GLOBAL_EXPEDITED:
 44 *                          Execute a memory barrier on all running threads
 45 *                          of all processes which previously registered
 46 *                          with MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
 47 *                          Upon return from system call, the caller thread
 48 *                          is ensured that all running threads have passed
 49 *                          through a state where all memory accesses to
 50 *                          user-space addresses match program order between
 51 *                          entry to and return from the system call
 52 *                          (non-running threads are de facto in such a
 53 *                          state). This only covers threads from processes
 54 *                          which registered with
 55 *                          MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
 56 *                          This command returns 0. Given that
 57 *                          registration is about the intent to receive
 58 *                          the barriers, it is valid to invoke
 59 *                          MEMBARRIER_CMD_GLOBAL_EXPEDITED from a
 60 *                          non-registered process.
 61 * @MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
 62 *                          Register the process intent to receive
 63 *                          MEMBARRIER_CMD_GLOBAL_EXPEDITED memory
 64 *                          barriers. Always returns 0.
 65 * @MEMBARRIER_CMD_PRIVATE_EXPEDITED:
 66 *                          Execute a memory barrier on each running
 67 *                          thread belonging to the same process as the current
 68 *                          thread. Upon return from system call, the
 69 *                          caller thread is ensured that all its running
 70 *                          threads siblings have passed through a state
 71 *                          where all memory accesses to user-space
 72 *                          addresses match program order between entry
 73 *                          to and return from the system call
 74 *                          (non-running threads are de facto in such a
 75 *                          state). This only covers threads from the
 76 *                          same process as the caller thread. This
 77 *                          command returns 0 on success. The
 78 *                          "expedited" commands complete faster than
 79 *                          the non-expedited ones, they never block,
 80 *                          but have the downside of causing extra
 81 *                          overhead. A process needs to register its
 82 *                          intent to use the private expedited command
 83 *                          prior to using it, otherwise this command
 84 *                          returns -EPERM.
 85 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
 86 *                          Register the process intent to use
 87 *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
 88 *                          returns 0.
 89 * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
 90 *                          In addition to provide memory ordering
 91 *                          guarantees described in
 92 *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED, ensure
 93 *                          the caller thread, upon return from system
 94 *                          call, that all its running threads siblings
 95 *                          have executed a core serializing
 96 *                          instruction. (architectures are required to
 97 *                          guarantee that non-running threads issue
 98 *                          core serializing instructions before they
 99 *                          resume user-space execution). This only
100 *                          covers threads from the same process as the
101 *                          caller thread. This command returns 0 on
102 *                          success. The "expedited" commands complete
103 *                          faster than the non-expedited ones, they
104 *                          never block, but have the downside of
105 *                          causing extra overhead. If this command is
106 *                          not implemented by an architecture, -EINVAL
107 *                          is returned. A process needs to register its
108 *                          intent to use the private expedited sync
109 *                          core command prior to using it, otherwise
110 *                          this command returns -EPERM.
111 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
112 *                          Register the process intent to use
113 *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE.
114 *                          If this command is not implemented by an
115 *                          architecture, -EINVAL is returned.
116 *                          Returns 0 on success.
117 * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
118 *                          Ensure the caller thread, upon return from
119 *                          system call, that all its running thread
120 *                          siblings have any currently running rseq
121 *                          critical sections restarted if @flags
122 *                          parameter is 0; if @flags parameter is
123 *                          MEMBARRIER_CMD_FLAG_CPU,
124 *                          then this operation is performed only
125 *                          on CPU indicated by @cpu_id. If this command is
126 *                          not implemented by an architecture, -EINVAL
127 *                          is returned. A process needs to register its
128 *                          intent to use the private expedited rseq
129 *                          command prior to using it, otherwise
130 *                          this command returns -EPERM.
131 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
132 *                          Register the process intent to use
133 *                          MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
134 *                          If this command is not implemented by an
135 *                          architecture, -EINVAL is returned.
136 *                          Returns 0 on success.
137 * @MEMBARRIER_CMD_SHARED:
138 *                          Alias to MEMBARRIER_CMD_GLOBAL. Provided for
139 *                          header backward compatibility.
140 * @MEMBARRIER_CMD_GET_REGISTRATIONS:
141 *                          Returns a bitmask of previously issued
142 *                          registration commands.
143 *
144 * Command to be passed to the membarrier system call. The commands need to
145 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
146 * the value 0.
147 */
148enum membarrier_cmd {
149	MEMBARRIER_CMD_QUERY					= 0,
150	MEMBARRIER_CMD_GLOBAL					= (1 << 0),
151	MEMBARRIER_CMD_GLOBAL_EXPEDITED				= (1 << 1),
152	MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED		= (1 << 2),
153	MEMBARRIER_CMD_PRIVATE_EXPEDITED			= (1 << 3),
154	MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED		= (1 << 4),
155	MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE		= (1 << 5),
156	MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE	= (1 << 6),
157	MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ			= (1 << 7),
158	MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ		= (1 << 8),
159	MEMBARRIER_CMD_GET_REGISTRATIONS			= (1 << 9),
160
161	/* Alias for header backward compatibility. */
162	MEMBARRIER_CMD_SHARED			= MEMBARRIER_CMD_GLOBAL,
163};
164
165enum membarrier_cmd_flag {
166	MEMBARRIER_CMD_FLAG_CPU		= (1 << 0),
167};
168
169#endif /* _LINUX_MEMBARRIER_H */