master
  1/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
  2 *
  3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4 * See https://llvm.org/LICENSE.txt for license information.
  5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6 *
  7 * The Arm C Language Extensions specifications can be found in the following
  8 * link: https://github.com/ARM-software/acle/releases
  9 *
 10 * The ACLE section numbers are subject to change. When consulting the
 11 * specifications, it is recommended to search using section titles if
 12 * the section numbers look outdated.
 13 *
 14 *===-----------------------------------------------------------------------===
 15 */
 16
 17#ifndef __ARM_ACLE_H
 18#define __ARM_ACLE_H
 19
 20#ifndef __ARM_ACLE
 21#error "ACLE intrinsics support not enabled."
 22#endif
 23
 24#include <stdint.h>
 25
 26#if defined(__cplusplus)
 27extern "C" {
 28#endif
 29
 30/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
 31/* 7.3 Memory barriers */
 32void __dmb(unsigned int);
 33void __dsb(unsigned int);
 34void __isb(unsigned int);
 35
 36/* 7.4 Hints */
 37void __wfi(void);
 38void __wfe(void);
 39void __sev(void);
 40void __sevl(void);
 41void __yield(void);
 42
 43#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 44#define __dbg(t) __builtin_arm_dbg(t)
 45#endif
 46
 47#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
 48#define _CHKFEAT_GCS 1
 49static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
 50__chkfeat(uint64_t __features) {
 51  return __builtin_arm_chkfeat(__features) ^ __features;
 52}
 53#endif
 54
 55/* 7.5 Swap */
 56static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 57__swp(uint32_t __x, volatile uint32_t *__p) {
 58  uint32_t v;
 59  do
 60    v = __builtin_arm_ldrex(__p);
 61  while (__builtin_arm_strex(__x, __p));
 62  return v;
 63}
 64
 65/* 7.6 Memory prefetch intrinsics */
 66/* 7.6.1 Data prefetch */
 67#define __pld(addr) __pldx(0, 0, 0, addr)
 68
 69#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 70#define __pldx(access_kind, cache_level, retention_policy, addr) \
 71  __builtin_arm_prefetch(addr, access_kind, 1)
 72#else
 73#define __pldx(access_kind, cache_level, retention_policy, addr) \
 74  __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
 75#endif
 76
 77/* 7.6.2 Instruction prefetch */
 78#define __pli(addr) __plix(0, 0, addr)
 79
 80#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
 81#define __plix(cache_level, retention_policy, addr) \
 82  __builtin_arm_prefetch(addr, 0, 0)
 83#else
 84#define __plix(cache_level, retention_policy, addr) \
 85  __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
 86#endif
 87
 88/* 7.7 NOP */
 89#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__))
 90static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
 91  __builtin_arm_nop();
 92}
 93#endif
 94
 95/* 8 DATA-PROCESSING INTRINSICS */
 96/* 8.2 Miscellaneous data-processing intrinsics */
 97/* ROR */
 98static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 99__ror(uint32_t __x, uint32_t __y) {
100  __y %= 32;
101  if (__y == 0)
102    return __x;
103  return (__x >> __y) | (__x << (32 - __y));
104}
105
106static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
107__rorll(uint64_t __x, uint32_t __y) {
108  __y %= 64;
109  if (__y == 0)
110    return __x;
111  return (__x >> __y) | (__x << (64 - __y));
112}
113
114static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
115__rorl(unsigned long __x, uint32_t __y) {
116#if __SIZEOF_LONG__ == 4
117  return __ror(__x, __y);
118#else
119  return __rorll(__x, __y);
120#endif
121}
122
123
124/* CLZ */
125static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
126__clz(uint32_t __t) {
127  return __builtin_arm_clz(__t);
128}
129
130static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
131__clzl(unsigned long __t) {
132#if __SIZEOF_LONG__ == 4
133  return __builtin_arm_clz(__t);
134#else
135  return __builtin_arm_clz64(__t);
136#endif
137}
138
139static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
140__clzll(uint64_t __t) {
141  return __builtin_arm_clz64(__t);
142}
143
144/* CLS */
145static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
146__cls(uint32_t __t) {
147  return __builtin_arm_cls(__t);
148}
149
150static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
151__clsl(unsigned long __t) {
152#if __SIZEOF_LONG__ == 4
153  return __builtin_arm_cls(__t);
154#else
155  return __builtin_arm_cls64(__t);
156#endif
157}
158
159static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
160__clsll(uint64_t __t) {
161  return __builtin_arm_cls64(__t);
162}
163
164/* REV */
165static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
166__rev(uint32_t __t) {
167  return __builtin_bswap32(__t);
168}
169
170static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
171__revl(unsigned long __t) {
172#if __SIZEOF_LONG__ == 4
173  return __builtin_bswap32(__t);
174#else
175  return __builtin_bswap64(__t);
176#endif
177}
178
179static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
180__revll(uint64_t __t) {
181  return __builtin_bswap64(__t);
182}
183
184/* REV16 */
185static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
186__rev16(uint32_t __t) {
187  return __ror(__rev(__t), 16);
188}
189
190static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
191__rev16ll(uint64_t __t) {
192  return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
193}
194
195static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
196__rev16l(unsigned long __t) {
197#if __SIZEOF_LONG__ == 4
198    return __rev16(__t);
199#else
200    return __rev16ll(__t);
201#endif
202}
203
204/* REVSH */
205static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
206__revsh(int16_t __t) {
207  return (int16_t)__builtin_bswap16((uint16_t)__t);
208}
209
210/* RBIT */
211static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
212__rbit(uint32_t __t) {
213  return __builtin_arm_rbit(__t);
214}
215
216static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
217__rbitll(uint64_t __t) {
218#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
219  return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
220         __builtin_arm_rbit(__t >> 32);
221#else
222  return __builtin_arm_rbit64(__t);
223#endif
224}
225
226static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
227__rbitl(unsigned long __t) {
228#if __SIZEOF_LONG__ == 4
229  return __rbit(__t);
230#else
231  return __rbitll(__t);
232#endif
233}
234
235/* 8.3 16-bit multiplications */
236#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
237static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
238__smulbb(int32_t __a, int32_t __b) {
239  return __builtin_arm_smulbb(__a, __b);
240}
241static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
242__smulbt(int32_t __a, int32_t __b) {
243  return __builtin_arm_smulbt(__a, __b);
244}
245static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
246__smultb(int32_t __a, int32_t __b) {
247  return __builtin_arm_smultb(__a, __b);
248}
249static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
250__smultt(int32_t __a, int32_t __b) {
251  return __builtin_arm_smultt(__a, __b);
252}
253static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
254__smulwb(int32_t __a, int32_t __b) {
255  return __builtin_arm_smulwb(__a, __b);
256}
257static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
258__smulwt(int32_t __a, int32_t __b) {
259  return __builtin_arm_smulwt(__a, __b);
260}
261#endif
262
263/*
264 * 8.4 Saturating intrinsics
265 *
266 * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
267 * intrinsics are implemented and the flag is enabled.
268 */
269/* 8.4.1 Width-specified saturation intrinsics */
270#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
271#define __ssat(x, y) __builtin_arm_ssat(x, y)
272#define __usat(x, y) __builtin_arm_usat(x, y)
273#endif
274
275/* 8.4.2 Saturating addition and subtraction intrinsics */
276#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
277static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
278__qadd(int32_t __t, int32_t __v) {
279  return __builtin_arm_qadd(__t, __v);
280}
281
282static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
283__qsub(int32_t __t, int32_t __v) {
284  return __builtin_arm_qsub(__t, __v);
285}
286
287static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
288__qdbl(int32_t __t) {
289  return __builtin_arm_qadd(__t, __t);
290}
291#endif
292
293/* 8.4.3 Accumulating multiplications */
294#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
295static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
296__smlabb(int32_t __a, int32_t __b, int32_t __c) {
297  return __builtin_arm_smlabb(__a, __b, __c);
298}
299static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
300__smlabt(int32_t __a, int32_t __b, int32_t __c) {
301  return __builtin_arm_smlabt(__a, __b, __c);
302}
303static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
304__smlatb(int32_t __a, int32_t __b, int32_t __c) {
305  return __builtin_arm_smlatb(__a, __b, __c);
306}
307static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
308__smlatt(int32_t __a, int32_t __b, int32_t __c) {
309  return __builtin_arm_smlatt(__a, __b, __c);
310}
311static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
312__smlawb(int32_t __a, int32_t __b, int32_t __c) {
313  return __builtin_arm_smlawb(__a, __b, __c);
314}
315static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
316__smlawt(int32_t __a, int32_t __b, int32_t __c) {
317  return __builtin_arm_smlawt(__a, __b, __c);
318}
319#endif
320
321
322/* 8.5.4 Parallel 16-bit saturation */
323#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
324#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
325#define __usat16(x, y) __builtin_arm_usat16(x, y)
326#endif
327
328/* 8.5.5 Packing and unpacking */
329#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
330typedef int32_t int8x4_t;
331typedef int32_t int16x2_t;
332typedef uint32_t uint8x4_t;
333typedef uint32_t uint16x2_t;
334
335static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
336__sxtab16(int16x2_t __a, int8x4_t __b) {
337  return __builtin_arm_sxtab16(__a, __b);
338}
339static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
340__sxtb16(int8x4_t __a) {
341  return __builtin_arm_sxtb16(__a);
342}
343static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
344__uxtab16(int16x2_t __a, int8x4_t __b) {
345  return __builtin_arm_uxtab16(__a, __b);
346}
347static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
348__uxtb16(int8x4_t __a) {
349  return __builtin_arm_uxtb16(__a);
350}
351#endif
352
353/* 8.5.6 Parallel selection */
354#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
355static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
356__sel(uint8x4_t __a, uint8x4_t __b) {
357  return __builtin_arm_sel(__a, __b);
358}
359#endif
360
361/* 8.5.7 Parallel 8-bit addition and subtraction */
362#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
363static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
364__qadd8(int8x4_t __a, int8x4_t __b) {
365  return __builtin_arm_qadd8(__a, __b);
366}
367static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
368__qsub8(int8x4_t __a, int8x4_t __b) {
369  return __builtin_arm_qsub8(__a, __b);
370}
371static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
372__sadd8(int8x4_t __a, int8x4_t __b) {
373  return __builtin_arm_sadd8(__a, __b);
374}
375static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
376__shadd8(int8x4_t __a, int8x4_t __b) {
377  return __builtin_arm_shadd8(__a, __b);
378}
379static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
380__shsub8(int8x4_t __a, int8x4_t __b) {
381  return __builtin_arm_shsub8(__a, __b);
382}
383static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
384__ssub8(int8x4_t __a, int8x4_t __b) {
385  return __builtin_arm_ssub8(__a, __b);
386}
387static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
388__uadd8(uint8x4_t __a, uint8x4_t __b) {
389  return __builtin_arm_uadd8(__a, __b);
390}
391static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
392__uhadd8(uint8x4_t __a, uint8x4_t __b) {
393  return __builtin_arm_uhadd8(__a, __b);
394}
395static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
396__uhsub8(uint8x4_t __a, uint8x4_t __b) {
397  return __builtin_arm_uhsub8(__a, __b);
398}
399static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
400__uqadd8(uint8x4_t __a, uint8x4_t __b) {
401  return __builtin_arm_uqadd8(__a, __b);
402}
403static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
404__uqsub8(uint8x4_t __a, uint8x4_t __b) {
405  return __builtin_arm_uqsub8(__a, __b);
406}
407static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
408__usub8(uint8x4_t __a, uint8x4_t __b) {
409  return __builtin_arm_usub8(__a, __b);
410}
411#endif
412
413/* 8.5.8 Sum of 8-bit absolute differences */
414#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
415static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
416__usad8(uint8x4_t __a, uint8x4_t __b) {
417  return __builtin_arm_usad8(__a, __b);
418}
419static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
420__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
421  return __builtin_arm_usada8(__a, __b, __c);
422}
423#endif
424
425/* 8.5.9 Parallel 16-bit addition and subtraction */
426#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
427static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
428__qadd16(int16x2_t __a, int16x2_t __b) {
429  return __builtin_arm_qadd16(__a, __b);
430}
431static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
432__qasx(int16x2_t __a, int16x2_t __b) {
433  return __builtin_arm_qasx(__a, __b);
434}
435static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
436__qsax(int16x2_t __a, int16x2_t __b) {
437  return __builtin_arm_qsax(__a, __b);
438}
439static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
440__qsub16(int16x2_t __a, int16x2_t __b) {
441  return __builtin_arm_qsub16(__a, __b);
442}
443static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
444__sadd16(int16x2_t __a, int16x2_t __b) {
445  return __builtin_arm_sadd16(__a, __b);
446}
447static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
448__sasx(int16x2_t __a, int16x2_t __b) {
449  return __builtin_arm_sasx(__a, __b);
450}
451static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
452__shadd16(int16x2_t __a, int16x2_t __b) {
453  return __builtin_arm_shadd16(__a, __b);
454}
455static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
456__shasx(int16x2_t __a, int16x2_t __b) {
457  return __builtin_arm_shasx(__a, __b);
458}
459static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
460__shsax(int16x2_t __a, int16x2_t __b) {
461  return __builtin_arm_shsax(__a, __b);
462}
463static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
464__shsub16(int16x2_t __a, int16x2_t __b) {
465  return __builtin_arm_shsub16(__a, __b);
466}
467static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
468__ssax(int16x2_t __a, int16x2_t __b) {
469  return __builtin_arm_ssax(__a, __b);
470}
471static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
472__ssub16(int16x2_t __a, int16x2_t __b) {
473  return __builtin_arm_ssub16(__a, __b);
474}
475static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
476__uadd16(uint16x2_t __a, uint16x2_t __b) {
477  return __builtin_arm_uadd16(__a, __b);
478}
479static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
480__uasx(uint16x2_t __a, uint16x2_t __b) {
481  return __builtin_arm_uasx(__a, __b);
482}
483static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
484__uhadd16(uint16x2_t __a, uint16x2_t __b) {
485  return __builtin_arm_uhadd16(__a, __b);
486}
487static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
488__uhasx(uint16x2_t __a, uint16x2_t __b) {
489  return __builtin_arm_uhasx(__a, __b);
490}
491static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
492__uhsax(uint16x2_t __a, uint16x2_t __b) {
493  return __builtin_arm_uhsax(__a, __b);
494}
495static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
496__uhsub16(uint16x2_t __a, uint16x2_t __b) {
497  return __builtin_arm_uhsub16(__a, __b);
498}
499static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
500__uqadd16(uint16x2_t __a, uint16x2_t __b) {
501  return __builtin_arm_uqadd16(__a, __b);
502}
503static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
504__uqasx(uint16x2_t __a, uint16x2_t __b) {
505  return __builtin_arm_uqasx(__a, __b);
506}
507static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
508__uqsax(uint16x2_t __a, uint16x2_t __b) {
509  return __builtin_arm_uqsax(__a, __b);
510}
511static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
512__uqsub16(uint16x2_t __a, uint16x2_t __b) {
513  return __builtin_arm_uqsub16(__a, __b);
514}
515static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
516__usax(uint16x2_t __a, uint16x2_t __b) {
517  return __builtin_arm_usax(__a, __b);
518}
519static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
520__usub16(uint16x2_t __a, uint16x2_t __b) {
521  return __builtin_arm_usub16(__a, __b);
522}
523#endif
524
525/* 8.5.10 Parallel 16-bit multiplication */
526#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
527static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
528__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
529  return __builtin_arm_smlad(__a, __b, __c);
530}
531static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
532__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
533  return __builtin_arm_smladx(__a, __b, __c);
534}
535static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
536__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
537  return __builtin_arm_smlald(__a, __b, __c);
538}
539static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
540__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
541  return __builtin_arm_smlaldx(__a, __b, __c);
542}
543static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
544__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
545  return __builtin_arm_smlsd(__a, __b, __c);
546}
547static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
548__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
549  return __builtin_arm_smlsdx(__a, __b, __c);
550}
551static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
552__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
553  return __builtin_arm_smlsld(__a, __b, __c);
554}
555static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
556__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
557  return __builtin_arm_smlsldx(__a, __b, __c);
558}
559static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
560__smuad(int16x2_t __a, int16x2_t __b) {
561  return __builtin_arm_smuad(__a, __b);
562}
563static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
564__smuadx(int16x2_t __a, int16x2_t __b) {
565  return __builtin_arm_smuadx(__a, __b);
566}
567static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
568__smusd(int16x2_t __a, int16x2_t __b) {
569  return __builtin_arm_smusd(__a, __b);
570}
571static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
572__smusdx(int16x2_t __a, int16x2_t __b) {
573  return __builtin_arm_smusdx(__a, __b);
574}
575#endif
576
577/* 8.6 Floating-point data-processing intrinsics */
578#if (defined(__ARM_FEATURE_DIRECTED_ROUNDING)    &&                         \
579  (__ARM_FEATURE_DIRECTED_ROUNDING))             &&                         \
580  (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)
581static __inline__ double __attribute__((__always_inline__, __nodebug__))
582__rintn(double __a) {
583  return __builtin_roundeven(__a);
584}
585
586static __inline__ float __attribute__((__always_inline__, __nodebug__))
587__rintnf(float __a) {
588  return __builtin_roundevenf(__a);
589}
590#endif
591
592/* 8.8 CRC32 intrinsics */
593static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
594__crc32b(uint32_t __a, uint8_t __b) {
595  return __builtin_arm_crc32b(__a, __b);
596}
597
598static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
599__crc32h(uint32_t __a, uint16_t __b) {
600  return __builtin_arm_crc32h(__a, __b);
601}
602
603static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
604__crc32w(uint32_t __a, uint32_t __b) {
605  return __builtin_arm_crc32w(__a, __b);
606}
607
608static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
609__crc32d(uint32_t __a, uint64_t __b) {
610  return __builtin_arm_crc32d(__a, __b);
611}
612
613static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
614__crc32cb(uint32_t __a, uint8_t __b) {
615  return __builtin_arm_crc32cb(__a, __b);
616}
617
618static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
619__crc32ch(uint32_t __a, uint16_t __b) {
620  return __builtin_arm_crc32ch(__a, __b);
621}
622
623static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
624__crc32cw(uint32_t __a, uint32_t __b) {
625  return __builtin_arm_crc32cw(__a, __b);
626}
627
628static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
629__crc32cd(uint32_t __a, uint64_t __b) {
630  return __builtin_arm_crc32cd(__a, __b);
631}
632
633/* 8.6 Floating-point data-processing intrinsics */
634/* Armv8.3-A Javascript conversion intrinsic */
635#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
636static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a")))
637__jcvt(double __a) {
638  return __builtin_arm_jcvt(__a);
639}
640#endif
641
642/* Armv8.5-A FP rounding intrinsics */
643#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
644static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
645__rint32zf(float __a) {
646  return __builtin_arm_rint32zf(__a);
647}
648
649static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
650__rint32z(double __a) {
651  return __builtin_arm_rint32z(__a);
652}
653
654static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
655__rint64zf(float __a) {
656  return __builtin_arm_rint64zf(__a);
657}
658
659static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
660__rint64z(double __a) {
661  return __builtin_arm_rint64z(__a);
662}
663
664static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
665__rint32xf(float __a) {
666  return __builtin_arm_rint32xf(__a);
667}
668
669static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
670__rint32x(double __a) {
671  return __builtin_arm_rint32x(__a);
672}
673
674static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
675__rint64xf(float __a) {
676  return __builtin_arm_rint64xf(__a);
677}
678
679static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
680__rint64x(double __a) {
681  return __builtin_arm_rint64x(__a);
682}
683#endif
684
685/* 8.9 Armv8.7-A load/store 64-byte intrinsics */
686#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
687typedef struct {
688    uint64_t val[8];
689} data512_t;
690
691static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
692__arm_ld64b(const void *__addr) {
693  data512_t __value;
694  __builtin_arm_ld64b(__addr, __value.val);
695  return __value;
696}
697static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64")))
698__arm_st64b(void *__addr, data512_t __value) {
699  __builtin_arm_st64b(__addr, __value.val);
700}
701static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
702__arm_st64bv(void *__addr, data512_t __value) {
703  return __builtin_arm_st64bv(__addr, __value.val);
704}
705static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
706__arm_st64bv0(void *__addr, data512_t __value) {
707  return __builtin_arm_st64bv0(__addr, __value.val);
708}
709#endif
710
711/* 11.1 Special register intrinsics */
712#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
713#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
714#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg)
715#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
716#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
717#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
718#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
719#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
720#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v)
721#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
722#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
723#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
724
725/* 10.3 MTE intrinsics */
726#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
727#define __arm_mte_create_random_tag(__ptr, __mask)  __builtin_arm_irg(__ptr, __mask)
728#define __arm_mte_increment_tag(__ptr, __tag_offset)  __builtin_arm_addg(__ptr, __tag_offset)
729#define __arm_mte_exclude_tag(__ptr, __excluded)  __builtin_arm_gmi(__ptr, __excluded)
730#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
731#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
732#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
733
734/* 18 memcpy family of operations intrinsics - MOPS */
735#define __arm_mops_memset_tag(__tagged_address, __value, __size)    \
736  __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
737#endif
738
739/* 11.3 Coprocessor Intrinsics */
740#if defined(__ARM_FEATURE_COPROC)
741
742#if (__ARM_FEATURE_COPROC & 0x1)
743
744#if (__ARM_ARCH < 8)
745#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)                           \
746  __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
747#endif /* __ARM_ARCH < 8 */
748
749#define __arm_ldc(coproc, CRd, p) __builtin_arm_ldc(coproc, CRd, p)
750#define __arm_stc(coproc, CRd, p) __builtin_arm_stc(coproc, CRd, p)
751
752#define __arm_mcr(coproc, opc1, value, CRn, CRm, opc2)                         \
753  __builtin_arm_mcr(coproc, opc1, value, CRn, CRm, opc2)
754#define __arm_mrc(coproc, opc1, CRn, CRm, opc2)                                \
755  __builtin_arm_mrc(coproc, opc1, CRn, CRm, opc2)
756
757#if (__ARM_ARCH != 4) && (__ARM_ARCH < 8)
758#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
759#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
760#endif /* (__ARM_ARCH != 4) && (__ARM_ARCH != 8) */
761
762#if (__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__)
763#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)                           \
764  __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
765#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
766#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
767#endif /* ___ARM_ARCH_8M_MAIN__ */
768
769#endif /* __ARM_FEATURE_COPROC & 0x1 */
770
771#if (__ARM_FEATURE_COPROC & 0x2)
772#define __arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2)                          \
773  __builtin_arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2)
774#define __arm_ldc2(coproc, CRd, p) __builtin_arm_ldc2(coproc, CRd, p)
775#define __arm_stc2(coproc, CRd, p) __builtin_arm_stc2(coproc, CRd, p)
776#define __arm_ldc2l(coproc, CRd, p) __builtin_arm_ldc2l(coproc, CRd, p)
777#define __arm_stc2l(coproc, CRd, p) __builtin_arm_stc2l(coproc, CRd, p)
778#define __arm_mcr2(coproc, opc1, value, CRn, CRm, opc2)                        \
779  __builtin_arm_mcr2(coproc, opc1, value, CRn, CRm, opc2)
780#define __arm_mrc2(coproc, opc1, CRn, CRm, opc2)                               \
781  __builtin_arm_mrc2(coproc, opc1, CRn, CRm, opc2)
782#endif
783
784#if (__ARM_FEATURE_COPROC & 0x4)
785#define __arm_mcrr(coproc, opc1, value, CRm)                                   \
786  __builtin_arm_mcrr(coproc, opc1, value, CRm)
787#define __arm_mrrc(coproc, opc1, CRm) __builtin_arm_mrrc(coproc, opc1, CRm)
788#endif
789
790#if (__ARM_FEATURE_COPROC & 0x8)
791#define __arm_mcrr2(coproc, opc1, value, CRm)                                  \
792  __builtin_arm_mcrr2(coproc, opc1, value, CRm)
793#define __arm_mrrc2(coproc, opc1, CRm) __builtin_arm_mrrc2(coproc, opc1, CRm)
794#endif
795
796#endif // __ARM_FEATURE_COPROC
797
798/* 17 Transactional Memory Extension (TME) Intrinsics */
799#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME
800
801#define _TMFAILURE_REASON  0x00007fffu
802#define _TMFAILURE_RTRY    0x00008000u
803#define _TMFAILURE_CNCL    0x00010000u
804#define _TMFAILURE_MEM     0x00020000u
805#define _TMFAILURE_IMP     0x00040000u
806#define _TMFAILURE_ERR     0x00080000u
807#define _TMFAILURE_SIZE    0x00100000u
808#define _TMFAILURE_NEST    0x00200000u
809#define _TMFAILURE_DBG     0x00400000u
810#define _TMFAILURE_INT     0x00800000u
811#define _TMFAILURE_TRIVIAL 0x01000000u
812
813#define __tstart()        __builtin_arm_tstart()
814#define __tcommit()       __builtin_arm_tcommit()
815#define __tcancel(__arg)  __builtin_arm_tcancel(__arg)
816#define __ttest()         __builtin_arm_ttest()
817
818#endif /* __ARM_FEATURE_TME */
819
820/* 8.7 Armv8.5-A Random number generation intrinsics */
821#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
822static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
823__rndr(uint64_t *__p) {
824  return __builtin_arm_rndr(__p);
825}
826static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
827__rndrrs(uint64_t *__p) {
828  return __builtin_arm_rndrrs(__p);
829}
830#endif
831
832/* 11.2 Guarded Control Stack intrinsics */
833#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
834static __inline__ void * __attribute__((__always_inline__, __nodebug__))
835__gcspr() {
836  return (void *)__builtin_arm_rsr64("gcspr_el0");
837}
838
839static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs")))
840__gcspopm() {
841  return __builtin_arm_gcspopm(0);
842}
843
844static __inline__ void *__attribute__((__always_inline__, __nodebug__,
845                                       target("gcs")))
846__gcsss(void *__stack) {
847  return __builtin_arm_gcsss(__stack);
848}
849#endif
850
851#if defined(__cplusplus)
852}
853#endif
854
855#endif /* __ARM_ACLE_H */