master
  1/*-
  2 * SPDX-License-Identifier: BSD-2-Clause
  3 *
  4 * Copyright (c) 2017 The FreeBSD Foundation
  5 *
  6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
  7 * under sponsorship from the FreeBSD Foundation.
  8 *
  9 * Redistribution and use in source and binary forms, with or without
 10 * modification, are permitted provided that the following conditions
 11 * are met:
 12 * 1. Redistributions of source code must retain the above copyright
 13 *    notice, this list of conditions and the following disclaimer.
 14 * 2. Redistributions in binary form must reproduce the above copyright
 15 *    notice, this list of conditions and the following disclaimer in the
 16 *    documentation and/or other materials provided with the distribution.
 17 *
 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 28 * SUCH DAMAGE.
 29 */
 30#ifndef _SYS_ATOMIC_COMMON_H_
 31#define	_SYS_ATOMIC_COMMON_H_
 32
 33#ifndef _MACHINE_ATOMIC_H_
 34#error do not include this header, use machine/atomic.h
 35#endif
 36
 37#include <sys/types.h>
 38
 39#define	__atomic_load_bool_relaxed(p)	(*(volatile _Bool *)(p))
 40#define	__atomic_store_bool_relaxed(p, v)	\
 41    (*(volatile _Bool *)(p) = (_Bool)(v))
 42
 43#define	__atomic_load_char_relaxed(p)	(*(volatile u_char *)(p))
 44#define	__atomic_load_short_relaxed(p)	(*(volatile u_short *)(p))
 45#define	__atomic_load_int_relaxed(p)	(*(volatile u_int *)(p))
 46#define	__atomic_load_long_relaxed(p)	(*(volatile u_long *)(p))
 47#define	__atomic_load_8_relaxed(p)	(*(volatile uint8_t *)(p))
 48#define	__atomic_load_16_relaxed(p)	(*(volatile uint16_t *)(p))
 49#define	__atomic_load_32_relaxed(p)	(*(volatile uint32_t *)(p))
 50#define	__atomic_load_64_relaxed(p)	(*(volatile uint64_t *)(p))
 51
 52#define	__atomic_store_char_relaxed(p, v)	\
 53    (*(volatile u_char *)(p) = (u_char)(v))
 54#define	__atomic_store_short_relaxed(p, v)	\
 55    (*(volatile u_short *)(p) = (u_short)(v))
 56#define	__atomic_store_int_relaxed(p, v)	\
 57    (*(volatile u_int *)(p) = (u_int)(v))
 58#define	__atomic_store_long_relaxed(p, v)	\
 59    (*(volatile u_long *)(p) = (u_long)(v))
 60#define	__atomic_store_8_relaxed(p, v)		\
 61    (*(volatile uint8_t *)(p) = (uint8_t)(v))
 62#define	__atomic_store_16_relaxed(p, v)		\
 63    (*(volatile uint16_t *)(p) = (uint16_t)(v))
 64#define	__atomic_store_32_relaxed(p, v)		\
 65    (*(volatile uint32_t *)(p) = (uint32_t)(v))
 66#define	__atomic_store_64_relaxed(p, v)		\
 67    (*(volatile uint64_t *)(p) = (uint64_t)(v))
 68
 69/*
 70 * When _Generic is available, try to provide some type checking.
 71 */
 72#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
 73    __has_extension(c_generic_selections)
 74#define	atomic_load_bool(p)			\
 75	_Generic(*(p), _Bool: __atomic_load_bool_relaxed(p))
 76#define	atomic_store_bool(p, v)			\
 77	_Generic(*(p), _Bool: __atomic_store_bool_relaxed(p, v))
 78
 79#define	__atomic_load_generic(p, t, ut, n)	\
 80	_Generic(*(p),				\
 81	    t: __atomic_load_ ## n ## _relaxed(p), \
 82	    ut: __atomic_load_ ## n ## _relaxed(p))
 83#define	__atomic_store_generic(p, v, t, ut, n)	\
 84	_Generic(*(p),				\
 85	    t: __atomic_store_ ## n ## _relaxed(p, v), \
 86	    ut: __atomic_store_ ## n ## _relaxed(p, v))
 87#else
 88#define	atomic_load_bool(p)			\
 89	__atomic_load_bool_relaxed(p)
 90#define	atomic_store_bool(p, v)			\
 91	__atomic_store_bool_relaxed(p, v)
 92#define	__atomic_load_generic(p, t, ut, n)	\
 93	__atomic_load_ ## n ## _relaxed(p)
 94#define	__atomic_store_generic(p, v, t, ut, n)	\
 95	__atomic_store_ ## n ## _relaxed(p, v)
 96#endif
 97
 98#define	atomic_load_char(p)	__atomic_load_generic(p, char, u_char, char)
 99#define	atomic_load_short(p)	__atomic_load_generic(p, short, u_short, short)
100#define	atomic_load_int(p)	__atomic_load_generic(p, int, u_int, int)
101#define	atomic_load_long(p)	__atomic_load_generic(p, long, u_long, long)
102#define	atomic_load_8(p)	__atomic_load_generic(p, int8_t, uint8_t, 8)
103#define	atomic_load_16(p)	__atomic_load_generic(p, int16_t, uint16_t, 16)
104#define	atomic_load_32(p)	__atomic_load_generic(p, int32_t, uint32_t, 32)
105#ifdef __LP64__
106#define	atomic_load_64(p)	__atomic_load_generic(p, int64_t, uint64_t, 64)
107#endif
108#define	atomic_store_char(p, v)			\
109	__atomic_store_generic(p, v, char, u_char, char)
110#define	atomic_store_short(p, v)		\
111	__atomic_store_generic(p, v, short, u_short, short)
112#define	atomic_store_int(p, v)			\
113	__atomic_store_generic(p, v, int, u_int, int)
114#define	atomic_store_long(p, v)			\
115	__atomic_store_generic(p, v, long, u_long, long)
116#define	atomic_store_8(p, v)			\
117	__atomic_store_generic(p, v, int8_t, uint8_t, 8)
118#define	atomic_store_16(p, v)			\
119	__atomic_store_generic(p, v, int16_t, uint16_t, 16)
120#define	atomic_store_32(p, v)			\
121	__atomic_store_generic(p, v, int32_t, uint32_t, 32)
122#ifdef __LP64__
123#define	atomic_store_64(p, v)			\
124	__atomic_store_generic(p, v, int64_t, uint64_t, 64)
125#endif
126
127#define	atomic_load_ptr(p)	(*(volatile __typeof(*p) *)(p))
128#define	atomic_store_ptr(p, v)	(*(volatile __typeof(*p) *)(p) = (v))
129
130/*
131 * Currently all architectures provide acquire and release fences on their own,
132 * but they don't provide consume. Kludge below allows relevant code to stop
133 * openly resorting to the stronger acquire fence, to be sorted out.
134 */
135#define	atomic_load_consume_ptr(p)	\
136    ((__typeof(*p)) atomic_load_acq_ptr((uintptr_t *)p))
137
138#define	atomic_interrupt_fence()	__compiler_membar()
139
140#endif /* !_SYS_ATOMIC_COMMON_H_ */