master
1//! __emutls_get_address specific builtin
2//!
3//! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
4//! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
5
6const std = @import("std");
7const builtin = @import("builtin");
8const common = @import("common.zig");
9
10const abort = std.posix.abort;
11const assert = std.debug.assert;
12const expect = std.testing.expect;
13
14/// defined in C as:
15/// typedef unsigned int gcc_word __attribute__((mode(word)));
16const gcc_word = usize;
17
18pub const panic = common.panic;
19
20comptime {
21 if (builtin.link_libc and (builtin.abi.isAndroid() or builtin.abi.isOpenHarmony() or builtin.os.tag == .openbsd)) {
22 @export(&__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage, .visibility = common.visibility });
23 }
24}
25
26/// public entrypoint for generated code using EmulatedTLS
27pub fn __emutls_get_address(control: *emutls_control) callconv(.c) *anyopaque {
28 return control.getPointer();
29}
30
31/// Simple allocator interface, to avoid pulling in the while
32/// std allocator implementation.
33const simple_allocator = struct {
34 /// Allocate a memory chunk for requested type. Return a pointer on the data.
35 pub fn alloc(comptime T: type) *T {
36 return @ptrCast(@alignCast(advancedAlloc(@alignOf(T), @sizeOf(T))));
37 }
38
39 /// Allocate a slice of T, with len elements.
40 pub fn allocSlice(comptime T: type, len: usize) []T {
41 return @as([*]T, @ptrCast(@alignCast(
42 advancedAlloc(@alignOf(T), @sizeOf(T) * len),
43 )))[0 .. len - 1];
44 }
45
46 /// Allocate a memory chunk.
47 pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 {
48 const minimal_alignment = @max(@alignOf(usize), alignment);
49
50 var aligned_ptr: ?*anyopaque = undefined;
51 if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) {
52 abort();
53 }
54
55 return @ptrCast(aligned_ptr);
56 }
57
58 /// Resize a slice.
59 pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T {
60 const c_ptr: *anyopaque = @ptrCast(slice.ptr);
61 const new_array: [*]T = @ptrCast(@alignCast(std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort()));
62 return new_array[0..len];
63 }
64
65 /// Free a memory chunk allocated with simple_allocator.
66 pub fn free(ptr: anytype) void {
67 std.c.free(@ptrCast(ptr));
68 }
69};
70
71/// Simple array of ?ObjectPointer with automatic resizing and
72/// automatic storage allocation.
73const ObjectArray = struct {
74 const ObjectPointer = *anyopaque;
75
76 // content of the array
77 slots: []?ObjectPointer,
78
79 /// create a new ObjectArray with n slots. must call deinit() to deallocate.
80 pub fn init(n: usize) *ObjectArray {
81 const array = simple_allocator.alloc(ObjectArray);
82
83 array.* = ObjectArray{
84 .slots = simple_allocator.allocSlice(?ObjectPointer, n),
85 };
86
87 for (array.slots) |*object| {
88 object.* = null;
89 }
90
91 return array;
92 }
93
94 /// deallocate the ObjectArray.
95 pub fn deinit(self: *ObjectArray) void {
96 // deallocated used objects in the array
97 for (self.slots) |*object| {
98 simple_allocator.free(object.*);
99 }
100 simple_allocator.free(self.slots);
101 simple_allocator.free(self);
102 }
103
104 /// resize the ObjectArray if needed.
105 pub fn ensureLength(self: *ObjectArray, new_len: usize) *ObjectArray {
106 const old_len = self.slots.len;
107
108 if (old_len > new_len) {
109 return self;
110 }
111
112 // reallocate
113 self.slots = simple_allocator.reallocSlice(?ObjectPointer, self.slots, new_len);
114
115 // init newly added slots
116 for (self.slots[old_len..]) |*object| {
117 object.* = null;
118 }
119
120 return self;
121 }
122
123 /// Retrieve the pointer at request index, using control to initialize it if needed.
124 pub fn getPointer(self: *ObjectArray, index: usize, control: *emutls_control) ObjectPointer {
125 if (self.slots[index] == null) {
126 // initialize the slot
127 const size = control.size;
128 const alignment: u29 = @truncate(control.alignment);
129
130 var data = simple_allocator.advancedAlloc(alignment, size);
131 errdefer simple_allocator.free(data);
132
133 if (control.default_value) |value| {
134 // default value: copy the content to newly allocated object.
135 @memcpy(data[0..size], @as([*]const u8, @ptrCast(value)));
136 } else {
137 // no default: return zeroed memory.
138 @memset(data[0..size], 0);
139 }
140
141 self.slots[index] = data;
142 }
143
144 return self.slots[index].?;
145 }
146};
147
148// Global structure for Thread Storage.
149// It provides thread-safety for on-demand storage of Thread Objects.
150const current_thread_storage = struct {
151 var key: std.c.pthread_key_t = undefined;
152 var init_once = std.once(current_thread_storage.init);
153
154 /// Return a per thread ObjectArray with at least the expected index.
155 pub fn getArray(index: usize) *ObjectArray {
156 if (current_thread_storage.getspecific()) |array| {
157 // we already have a specific. just ensure the array is
158 // big enough for the wanted index.
159 return array.ensureLength(index);
160 }
161
162 // no specific. we need to create a new array.
163
164 // make it to contains at least 16 objects (to avoid too much
165 // reallocation at startup).
166 const size = @max(16, index);
167
168 // create a new array and store it.
169 const array: *ObjectArray = ObjectArray.init(size);
170 current_thread_storage.setspecific(array);
171 return array;
172 }
173
174 /// Return casted thread specific value.
175 fn getspecific() ?*ObjectArray {
176 return @ptrCast(@alignCast(std.c.pthread_getspecific(current_thread_storage.key)));
177 }
178
179 /// Set casted thread specific value.
180 fn setspecific(new: ?*ObjectArray) void {
181 if (std.c.pthread_setspecific(current_thread_storage.key, @ptrCast(new)) != 0) {
182 abort();
183 }
184 }
185
186 /// Initialize pthread_key_t.
187 fn init() void {
188 if (std.c.pthread_key_create(¤t_thread_storage.key, current_thread_storage.deinit) != .SUCCESS) {
189 abort();
190 }
191 }
192
193 /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer.
194 fn deinit(arrayPtr: *anyopaque) callconv(.c) void {
195 var array: *ObjectArray = @ptrCast(@alignCast(arrayPtr));
196 array.deinit();
197 }
198};
199
200const emutls_control = extern struct {
201 // A emutls_control value is a global value across all
202 // threads. The threads shares the index of TLS variable. The data
203 // array (containing address of allocated variables) is thread
204 // specific and stored using pthread_setspecific().
205
206 // size of the object in bytes
207 size: gcc_word,
208
209 // alignment of the object in bytes
210 alignment: gcc_word,
211
212 object: extern union {
213 // data[index-1] is the object address / 0 = uninit
214 index: usize,
215
216 // object address, when in single thread env (not used)
217 address: *anyopaque,
218 },
219
220 // null or non-zero initial value for the object
221 default_value: ?*const anyopaque,
222
223 // global Mutex used to serialize control.index initialization.
224 var mutex: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER;
225
226 // global counter for keeping track of requested indexes.
227 // access should be done with mutex held.
228 var next_index: usize = 1;
229
230 /// Simple wrapper for global lock.
231 fn lock() void {
232 if (std.c.pthread_mutex_lock(&emutls_control.mutex) != .SUCCESS) {
233 abort();
234 }
235 }
236
237 /// Simple wrapper for global unlock.
238 fn unlock() void {
239 if (std.c.pthread_mutex_unlock(&emutls_control.mutex) != .SUCCESS) {
240 abort();
241 }
242 }
243
244 /// Helper to retrieve nad initialize global unique index per emutls variable.
245 pub fn getIndex(self: *emutls_control) usize {
246 // Two threads could race against the same emutls_control.
247
248 // Use atomic for reading coherent value lockless.
249 const index_lockless = @atomicLoad(usize, &self.object.index, .acquire);
250
251 if (index_lockless != 0) {
252 // index is already initialized, return it.
253 return index_lockless;
254 }
255
256 // index is uninitialized: take global lock to avoid possible race.
257 emutls_control.lock();
258 defer emutls_control.unlock();
259
260 const index_locked = self.object.index;
261 if (index_locked != 0) {
262 // we lost a race, but index is already initialized: nothing particular to do.
263 return index_locked;
264 }
265
266 // Store a new index atomically (for having coherent index_lockless reading).
267 @atomicStore(usize, &self.object.index, emutls_control.next_index, .release);
268
269 // Increment the next available index
270 emutls_control.next_index += 1;
271
272 return self.object.index;
273 }
274
275 /// Simple helper for testing purpose.
276 pub fn init(comptime T: type, default_value: ?*const T) emutls_control {
277 return emutls_control{
278 .size = @sizeOf(T),
279 .alignment = @alignOf(T),
280 .object = .{ .index = 0 },
281 .default_value = @ptrCast(default_value),
282 };
283 }
284
285 /// Get the pointer on allocated storage for emutls variable.
286 pub fn getPointer(self: *emutls_control) *anyopaque {
287 // ensure current_thread_storage initialization is done
288 current_thread_storage.init_once.call();
289
290 const index = self.getIndex();
291 var array = current_thread_storage.getArray(index);
292
293 return array.getPointer(index - 1, self);
294 }
295
296 /// Testing helper for retrieving typed pointer.
297 pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T {
298 assert(self.size == @sizeOf(T));
299 assert(self.alignment == @alignOf(T));
300 return @ptrCast(@alignCast(self.getPointer()));
301 }
302};
303
304test "simple_allocator" {
305 if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
306
307 const data1: *[64]u8 = simple_allocator.alloc([64]u8);
308 defer simple_allocator.free(data1);
309 for (data1) |*c| {
310 c.* = 0xff;
311 }
312
313 const data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64);
314 defer simple_allocator.free(data2);
315 for (data2[0..63]) |*c| {
316 c.* = 0xff;
317 }
318}
319
320test "__emutls_get_address zeroed" {
321 if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
322
323 var ctl = emutls_control.init(usize, null);
324 try expect(ctl.object.index == 0);
325
326 // retrieve a variable from ctl
327 const x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
328 try expect(ctl.object.index != 0); // index has been allocated for this ctl
329 try expect(x.* == 0); // storage has been zeroed
330
331 // modify the storage
332 x.* = 1234;
333
334 // retrieve a variable from ctl (same ctl)
335 const y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
336
337 try expect(y.* == 1234); // same content that x.*
338 try expect(x == y); // same pointer
339}
340
341test "__emutls_get_address with default_value" {
342 if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
343
344 const value: usize = 5678; // default value
345 var ctl = emutls_control.init(usize, &value);
346 try expect(ctl.object.index == 0);
347
348 const x: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
349 try expect(ctl.object.index != 0);
350 try expect(x.* == 5678); // storage initialized with default value
351
352 // modify the storage
353 x.* = 9012;
354
355 try expect(value == 5678); // the default value didn't change
356
357 const y: *usize = @ptrCast(@alignCast(__emutls_get_address(&ctl)));
358 try expect(y.* == 9012); // the modified storage persists
359}
360
361test "test default_value with different sizes" {
362 if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
363
364 const testType = struct {
365 fn _testType(comptime T: type, value: T) !void {
366 var ctl = emutls_control.init(T, &value);
367 const x = ctl.get_typed_pointer(T);
368 try expect(x.* == value);
369 }
370 }._testType;
371
372 try testType(usize, 1234);
373 try testType(u32, 1234);
374 try testType(i16, -12);
375 try testType(f64, -12.0);
376 try testType(
377 @TypeOf("012345678901234567890123456789"),
378 "012345678901234567890123456789",
379 );
380}