Commit 5aeffab693

Andrew Kelley <andrew@ziglang.org>
2021-12-16 03:12:24
glibc: import sysdep.h from upstream
it's needed for mipsel-linux-gnueabihf
1 parent 5b6d26e
Changed files (11)
lib
libc
glibc
sysdeps
arc
csky
nios2
unix
mips
mips64
sysv
linux
arc
csky
mips
nios2
powerpc
s390
lib/libc/glibc/sysdeps/arc/sysdep.h
@@ -0,0 +1,53 @@
+/* Assembler macros for ARC.
+   Copyright (C) 2020-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/generic/sysdep.h>
+
+#ifdef	__ASSEMBLER__
+
+/* Syntactic details of assembler.
+   ; is not newline but comment, # is also for comment.  */
+# define ASM_SIZE_DIRECTIVE(name) .size name,.-name
+
+# define ENTRY(name)						\
+	.align 4				ASM_LINE_SEP	\
+	.globl C_SYMBOL_NAME(name)		ASM_LINE_SEP	\
+	.type C_SYMBOL_NAME(name),%function	ASM_LINE_SEP	\
+	C_LABEL(name)				ASM_LINE_SEP	\
+	cfi_startproc				ASM_LINE_SEP	\
+	CALL_MCOUNT
+
+# undef  END
+# define END(name)						\
+	cfi_endproc				ASM_LINE_SEP	\
+	ASM_SIZE_DIRECTIVE(name)
+
+# ifdef SHARED
+#  define PLTJMP(_x)	_x##@plt
+# else
+#  define PLTJMP(_x)	_x
+# endif
+
+# define L(label) .L##label
+
+# define CALL_MCOUNT		/* Do nothing for now.  */
+
+# define STR(reg, rbase, off)	st  reg, [rbase, off * 4]
+# define LDR(reg, rbase, off)	ld  reg, [rbase, off * 4]
+
+#endif	/* __ASSEMBLER__ */
lib/libc/glibc/sysdeps/csky/sysdep.h
@@ -0,0 +1,84 @@
+/* Assembler macros for C-SKY.
+   Copyright (C) 2018-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/generic/sysdep.h>
+#include <features.h>
+
+#ifdef __ASSEMBLER__
+
+# define ASM_SIZE_DIRECTIVE(name) .size name,.-name
+
+/* Define an entry point visible from C.  */
+# define ENTRY(name)		\
+	.globl name;		\
+	.type name,@function;	\
+	.align 4;		\
+	name##:;		\
+	cfi_startproc;		\
+	CALL_MCOUNT
+
+# undef  END
+# define END(name)		\
+	cfi_endproc;		\
+	ASM_SIZE_DIRECTIVE(name)
+
+/* If compiled for profiling, call `mcount' at the start of each function.  */
+# ifdef PROF
+#  ifdef __PIC__
+#   define CALL_MCOUNT				\
+	subi	sp, 4;				\
+	stw	lr, (sp, 0);			\
+	grs	t0, .Lgetpc;			\
+.Lgetpc:					\
+	lrw	gb, .Lgetpc@GOTPC;		\
+	addu	gb, t0;				\
+	lrw	t1, _mcount@PLT;		\
+	ldr.w	t0, (gb, t1 << 0);		\
+	jmp	t0;
+#  else
+#   define CALL_MCOUNT				\
+	subi	sp, 4;				\
+	stw	lr, (sp, 0);			\
+	jbsr	_mcount;
+#  endif
+# else
+#  define CALL_MCOUNT	/* Do nothing.  */
+# endif
+
+# if defined (__CK860__)
+/* Instruction fetch will be faster when the label is 16 bytes aligned.
+   Filling with nop instruction to avoid extra jump.  */
+#  define LABLE_ALIGN	\
+	.balignw 16, 0x6c03
+
+#  define PRE_BNEZAD(R)
+
+#  define BNEZAD(R, L)	\
+	bnezad	R, L
+# else
+#  define LABLE_ALIGN	\
+	.balignw 8, 0x6c03
+
+#  define PRE_BNEZAD(R)	\
+	subi	R, 1
+
+#  define BNEZAD(R, L)	\
+	bnez	R, L
+# endif
+
+#endif
lib/libc/glibc/sysdeps/nios2/sysdep.h
@@ -0,0 +1,65 @@
+/* Assembler macros for Nios II.
+   Copyright (C) 2015-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/generic/sysdep.h>
+
+#ifdef	__ASSEMBLER__
+
+/* Syntactic details of assembler.  */
+
+#define ASM_SIZE_DIRECTIVE(name) .size name,.-name
+
+#define ENTRY(name)						 \
+  .globl C_SYMBOL_NAME(name);					 \
+  .type C_SYMBOL_NAME(name),%function;				 \
+  C_LABEL(name)							 \
+  cfi_startproc;						 \
+  CALL_MCOUNT;
+
+#undef  END
+#define END(name)				\
+  cfi_endproc;					\
+  ASM_SIZE_DIRECTIVE(name)
+
+#ifdef PROF
+
+# ifdef __PIC__
+#  define CALL_MCOUNT				\
+  mov r8, ra;					\
+  nextpc r2;					\
+1:						\
+  movhi r3, %hiadj(_gp_got - 1b);		\
+  addi r3, r3, %lo(_gp_got - 1b);		\
+  add r2, r2, r3;				\
+  ldw r2, %call(_mcount)(r2);			\
+  callr r2;					\
+  mov ra, r8;					\
+  ret;
+# else
+#  define CALL_MCOUNT				\
+  mov r8, ra;					\
+  call _mount;					\
+  mov ra, r8;					\
+  ret;
+# endif
+
+#else
+# define CALL_MCOUNT		/* Do nothing.  */
+#endif
+
+#endif	/* __ASSEMBLER__ */
lib/libc/glibc/sysdeps/unix/mips/mips64/sysdep.h
@@ -0,0 +1,65 @@
+/* Copyright (C) 1992-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Alexandre Oliva <aoliva@redhat.com>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/unix/mips/sysdep.h>
+
+#ifdef __ASSEMBLER__
+#include <sys/asm.h>
+
+/* Note that while it's better structurally, going back to call __syscall_error
+   can make things confusing if you're debugging---it looks like it's jumping
+   backwards into the previous fn.  */
+#ifdef __PIC__
+#define PSEUDO(name, syscall_name, args) \
+  .align 2;								      \
+  .set nomips16;							      \
+  cfi_startproc;							      \
+  99:;									      \
+  .set noat;								      \
+  .cpsetup t9, $1, name;						      \
+  cfi_register (gp, $1);						      \
+  .set at;								      \
+  PTR_LA t9,__syscall_error;						      \
+  .cpreturn;								      \
+  cfi_restore (gp);							      \
+  jr t9;								      \
+  cfi_endproc;								      \
+  ENTRY(name)								      \
+  li v0, SYS_ify(syscall_name);						      \
+  syscall;								      \
+  bne a3, zero, 99b;							      \
+L(syse1):
+#else
+#define PSEUDO(name, syscall_name, args) \
+  .set noreorder;							      \
+  .align 2;								      \
+  .set nomips16;							      \
+  cfi_startproc;							      \
+  99: j __syscall_error;						      \
+  nop;                                                                        \
+  cfi_endproc;								      \
+  ENTRY(name)								      \
+  .set noreorder;							      \
+  li v0, SYS_ify(syscall_name);						      \
+  syscall;								      \
+  .set reorder;								      \
+  bne a3, zero, 99b;							      \
+L(syse1):
+#endif
+
+#endif
lib/libc/glibc/sysdeps/unix/sysv/linux/arc/sysdep.h
@@ -0,0 +1,229 @@
+/* Assembler macros for ARC.
+   Copyright (C) 2020-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef _LINUX_ARC_SYSDEP_H
+#define _LINUX_ARC_SYSDEP_H 1
+
+#include <sysdeps/arc/sysdep.h>
+#include <sysdeps/unix/sysv/linux/generic/sysdep.h>
+
+/* "workarounds" for generic code needing to handle 64-bit time_t.  */
+
+/* Fix sysdeps/unix/sysv/linux/clock_getcpuclockid.c.  */
+#define __NR_clock_getres	__NR_clock_getres_time64
+/* Fix sysdeps/nptl/lowlevellock-futex.h.  */
+#define __NR_futex		__NR_futex_time64
+/* Fix sysdeps/unix/sysv/linux/pause.c.  */
+#define __NR_ppoll		__NR_ppoll_time64
+/* Fix sysdeps/unix/sysv/linux/select.c.  */
+#define __NR_pselect6		__NR_pselect6_time64
+/* Fix sysdeps/unix/sysv/linux/recvmmsg.c.  */
+#define __NR_recvmmsg		__NR_recvmmsg_time64
+/* Fix sysdeps/unix/sysv/linux/sigtimedwait.c.  */
+#define __NR_rt_sigtimedwait	__NR_rt_sigtimedwait_time64
+/* Fix sysdeps/unix/sysv/linux/semtimedop.c.  */
+#define __NR_semtimedop		__NR_semtimedop_time64
+/* Hack sysdeps/unix/sysv/linux/generic/utimes.c.  */
+#define __NR_utimensat		__NR_utimensat_time64
+
+/* For RTLD_PRIVATE_ERRNO.  */
+#include <dl-sysdep.h>
+
+#include <tls.h>
+
+#undef SYS_ify
+#define SYS_ify(syscall_name)   __NR_##syscall_name
+
+#ifdef __ASSEMBLER__
+
+/* This is a "normal" system call stub: if there is an error,
+   it returns -1 and sets errno.  */
+
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args)			\
+  PSEUDO_NOERRNO(name, syscall_name, args)	ASM_LINE_SEP	\
+    brhi   r0, -4096, L (call_syscall_err)	ASM_LINE_SEP
+
+# define ret	j_s  [blink]
+
+# undef PSEUDO_END
+# define PSEUDO_END(name)					\
+  SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
+  END (name)
+
+/* --------- Helper for SYSCALL_NOERRNO -----------
+   This kind of system call stub never returns an error.
+   We return the return value register to the caller unexamined.  */
+
+# undef PSEUDO_NOERRNO
+# define PSEUDO_NOERRNO(name, syscall_name, args)		\
+  .text						ASM_LINE_SEP	\
+  ENTRY (name)					ASM_LINE_SEP	\
+    DO_CALL (syscall_name, args)		ASM_LINE_SEP	\
+
+/* Return the return value register unexamined. Since r0 is both
+   syscall return reg and function return reg, no work needed.  */
+# define ret_NOERRNO						\
+  j_s  [blink]		ASM_LINE_SEP
+
+# undef PSEUDO_END_NOERRNO
+# define PSEUDO_END_NOERRNO(name)				\
+  END (name)
+
+/* --------- Helper for SYSCALL_ERRVAL -----------
+   This kind of system call stub returns the errno code as its return
+   value, or zero for success.  We may massage the kernel's return value
+   to meet that ABI, but we never set errno here.  */
+
+# undef PSEUDO_ERRVAL
+# define PSEUDO_ERRVAL(name, syscall_name, args)		\
+  PSEUDO_NOERRNO(name, syscall_name, args)	ASM_LINE_SEP
+
+/* Don't set errno, return kernel error (in errno form) or zero.  */
+# define ret_ERRVAL						\
+  rsub   r0, r0, 0				ASM_LINE_SEP	\
+  ret_NOERRNO
+
+# undef PSEUDO_END_ERRVAL
+# define PSEUDO_END_ERRVAL(name)				\
+  END (name)
+
+
+/* To reduce the code footprint, we confine the actual errno access
+   to single place in __syscall_error().
+   This takes raw kernel error value, sets errno and returns -1.  */
+# if IS_IN (libc)
+#  define CALL_ERRNO_SETTER_C	bl     PLTJMP(HIDDEN_JUMPTARGET(__syscall_error))
+# else
+#  define CALL_ERRNO_SETTER_C	bl     PLTJMP(__syscall_error)
+# endif
+
+# define SYSCALL_ERROR_HANDLER				\
+L (call_syscall_err):			ASM_LINE_SEP	\
+    push_s   blink			ASM_LINE_SEP	\
+    cfi_adjust_cfa_offset (4)		ASM_LINE_SEP	\
+    cfi_rel_offset (blink, 0)		ASM_LINE_SEP	\
+    CALL_ERRNO_SETTER_C			ASM_LINE_SEP	\
+    pop_s  blink			ASM_LINE_SEP	\
+    cfi_adjust_cfa_offset (-4)		ASM_LINE_SEP	\
+    cfi_restore (blink)			ASM_LINE_SEP	\
+    j_s      [blink]
+
+# define DO_CALL(syscall_name, args)			\
+    mov    r8, __NR_##syscall_name	ASM_LINE_SEP	\
+    ARC_TRAP_INSN			ASM_LINE_SEP
+
+# define ARC_TRAP_INSN	trap_s 0
+
+#else  /* !__ASSEMBLER__ */
+
+# define SINGLE_THREAD_BY_GLOBAL		1
+
+# if IS_IN (libc)
+extern long int __syscall_error (long int);
+hidden_proto (__syscall_error)
+# endif
+
+# define ARC_TRAP_INSN	"trap_s 0	\n\t"
+
+# undef INTERNAL_SYSCALL_NCS
+# define INTERNAL_SYSCALL_NCS(number, nr_args, args...)	\
+  ({								\
+    /* Per ABI, r0 is 1st arg and return reg.  */		\
+    register long int __ret __asm__("r0");			\
+    register long int _sys_num __asm__("r8");			\
+								\
+    LOAD_ARGS_##nr_args (number, args)				\
+								\
+    __asm__ volatile (						\
+                      ARC_TRAP_INSN				\
+                      : "+r" (__ret)				\
+                      : "r"(_sys_num) ASM_ARGS_##nr_args	\
+                      : "memory");				\
+                                                                \
+    __ret; })
+
+# undef INTERNAL_SYSCALL
+# define INTERNAL_SYSCALL(name, nr, args...) 	\
+  INTERNAL_SYSCALL_NCS(__NR_##name, nr, args)
+
+/* Macros for setting up inline __asm__ input regs.  */
+# define ASM_ARGS_0
+# define ASM_ARGS_1	ASM_ARGS_0, "r" (__ret)
+# define ASM_ARGS_2	ASM_ARGS_1, "r" (_arg2)
+# define ASM_ARGS_3	ASM_ARGS_2, "r" (_arg3)
+# define ASM_ARGS_4	ASM_ARGS_3, "r" (_arg4)
+# define ASM_ARGS_5	ASM_ARGS_4, "r" (_arg5)
+# define ASM_ARGS_6	ASM_ARGS_5, "r" (_arg6)
+# define ASM_ARGS_7	ASM_ARGS_6, "r" (_arg7)
+
+/* Macros for converting sys-call wrapper args into sys call args.  */
+# define LOAD_ARGS_0(nm, arg)				\
+  _sys_num = (long int) (nm);
+
+# define LOAD_ARGS_1(nm, arg1)				\
+  __ret = (long int) (arg1);					\
+  LOAD_ARGS_0 (nm, arg1)
+
+/* Note that the use of _tmpX might look superflous, however it is needed
+   to ensure that register variables are not clobbered if arg happens to be
+   a function call itself. e.g. sched_setaffinity() calling getpid() for arg2
+   Also this specific order of recursive calling is important to segregate
+   the tmp args evaluation (function call case described above) and assigment
+   of register variables.  */
+
+# define LOAD_ARGS_2(nm, arg1, arg2)			\
+  long int _tmp2 = (long int) (arg2);			\
+  LOAD_ARGS_1 (nm, arg1)				\
+  register long int _arg2 __asm__ ("r1") = _tmp2;
+
+# define LOAD_ARGS_3(nm, arg1, arg2, arg3)		\
+  long int _tmp3 = (long int) (arg3);			\
+  LOAD_ARGS_2 (nm, arg1, arg2)				\
+  register long int _arg3 __asm__ ("r2") = _tmp3;
+
+#define LOAD_ARGS_4(nm, arg1, arg2, arg3, arg4)		\
+  long int _tmp4 = (long int) (arg4);			\
+  LOAD_ARGS_3 (nm, arg1, arg2, arg3)			\
+  register long int _arg4 __asm__ ("r3") = _tmp4;
+
+# define LOAD_ARGS_5(nm, arg1, arg2, arg3, arg4, arg5)	\
+  long int _tmp5 = (long int) (arg5);			\
+  LOAD_ARGS_4 (nm, arg1, arg2, arg3, arg4)		\
+  register long int _arg5 __asm__ ("r4") = _tmp5;
+
+# define LOAD_ARGS_6(nm,  arg1, arg2, arg3, arg4, arg5, arg6)\
+  long int _tmp6 = (long int) (arg6);			\
+  LOAD_ARGS_5 (nm, arg1, arg2, arg3, arg4, arg5)	\
+  register long int _arg6 __asm__ ("r5") = _tmp6;
+
+# define LOAD_ARGS_7(nm, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\
+  long int _tmp7 = (int) (arg7);				\
+  LOAD_ARGS_6 (nm, arg1, arg2, arg3, arg4, arg5, arg6)	\
+  register long int _arg7 __asm__ ("r6") = _tmp7;
+
+/* Pointer mangling not yet supported.  */
+# define PTR_MANGLE(var) (void) (var)
+# define PTR_DEMANGLE(var) (void) (var)
+
+# undef HAVE_INTERNAL_BRK_ADDR_SYMBOL
+# define HAVE_INTERNAL_BRK_ADDR_SYMBOL  1
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* linux/arc/sysdep.h */
lib/libc/glibc/sysdeps/unix/sysv/linux/csky/sysdep.h
@@ -0,0 +1,515 @@
+/* Assembly macros for C-SKY.
+   Copyright (C) 2018-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef _LINUX_CSKY_SYSDEP_H
+#define _LINUX_CSKY_SYSDEP_H 1
+
+/* There is some commonality.  */
+#include <sysdeps/unix/sysv/linux/generic/sysdep.h>
+#include <sysdeps/unix/sysv/linux/sysdep.h>
+#include <sysdeps/csky/sysdep.h>
+
+/* Defines RTLD_PRIVATE_ERRNO and USE_DL_SYSINFO.  */
+#include <dl-sysdep.h>
+
+#include <tls.h>
+
+/* In order to get __set_errno() definition in INLINE_SYSCALL.  */
+#ifndef __ASSEMBLER__
+# include <errno.h>
+#endif
+
+#undef SYS_ify
+#define SYS_ify(syscall_name)  (__NR_##syscall_name)
+
+#ifdef __ASSEMBLER__
+/* Linux uses a negative return value to indicate syscall errors,
+   unlike most Unices, which use the condition codes' carry flag.
+
+   Since version 2.1 the return value of a system call might be
+   negative even if the call succeeded.  E.g., the `lseek' system call
+   might return a large offset.  Therefore we must not anymore test
+   for < 0, but test for a real error by making sure the value in R0
+   is a real error number.  Linus said he will make sure the no syscall
+   returns a value in -1 .. -4095 as a valid result so we can safely
+   test with -4095.  */
+
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args)	\
+  .text;					\
+  ENTRY (name);					\
+  DO_CALL (syscall_name, args);
+
+# define GETGB				\
+	grs	t0, .Lgetpc;		\
+.Lgetpc:				\
+	lrw	gb, .Lgetpc@GOTPC;	\
+	addu	gb, t0;
+
+# if IS_IN (libc)
+#  ifdef __PIC__
+#   define PSEUDO_RET			\
+	btsti	a0, 31;			\
+	bf	1f;			\
+	subi	sp, 8;			\
+	st.w	lr, (sp);		\
+	st.w	gb, (sp, 4);		\
+	GETGB;				\
+	lrw	a2, SYSCALL_ERROR@PLT;	\
+	add	a2, gb;			\
+	ld.w	a2, (a2);		\
+	jsr	a2;			\
+	ld.w	lr, (sp);		\
+	ld.w	gb, (sp, 4);		\
+	addi	sp, 8;			\
+1:					\
+	rts
+#  else
+#   define PSEUDO_RET			\
+	btsti	a0, 31;			\
+	bf	1f;			\
+	jmpi	SYSCALL_ERROR;		\
+1:					\
+	rts
+#  endif
+# else
+#  ifdef __PIC__
+#   define PSEUDO_RET			\
+	btsti	a0, 31;			\
+	bf	1f;			\
+	subi	sp, 8;			\
+	st.w	lr, (sp);		\
+	st.w	gb, (sp, 4);		\
+	GETGB;				\
+	bsr	SYSCALL_ERROR;		\
+	ld.w	lr, (sp);		\
+	ld.w	gb, (sp, 4);		\
+	addi	sp, 8;			\
+1:					\
+	rts
+#  else
+#   define PSEUDO_RET			\
+	btsti	a0, 31;			\
+	bt	SYSCALL_ERROR;		\
+	rts
+#  endif
+# endif
+
+# undef ret
+# define ret PSEUDO_RET
+
+# undef PSEUDO_END
+# define PSEUDO_END(name)		\
+  .align 4;				\
+  SYSCALL_ERROR_HANDLER;		\
+  END (name)
+
+# undef PSEUDO_NOERRNO
+# define PSEUDO_NOERRNO(name, syscall_name, args)	\
+  .text;						\
+  ENTRY (name);						\
+  DO_CALL (syscall_name, args)
+
+# define PSEUDO_RET_NOERRNO rts
+
+# undef ret_NOERRNO
+# define ret_NOERRNO PSEUDO_RET_NOERRNO
+
+# undef PSEUDO_END_NOERRNO
+# define PSEUDO_END_NOERRNO(name) END (name)
+
+/* The function has to return the error code.  */
+# undef PSEUDO_ERRVAL
+# define PSEUDO_ERRVAL(name, syscall_name, args)	\
+  .text;						\
+  ENTRY (name)						\
+  DO_CALL (syscall_name, args);				\
+  not	a0;						\
+  addi	a0, 1
+
+# undef PSEUDO_END_ERRVAL
+# define PSEUDO_END_ERRVAL(name) END (name)
+
+# define ret_ERRVAL rts
+
+# if !IS_IN (libc)
+#  define SYSCALL_ERROR __local_syscall_error
+#  if RTLD_PRIVATE_ERRNO
+#   ifdef __PIC__
+#    define SYSCALL_ERROR_HANDLER	\
+__local_syscall_error:			\
+	lrw	a1, rtld_errno@PLT; 	\
+	addu	a1, gb;			\
+	ldw	a1, (a1);		\
+	rsubi	a0, 0;			\
+	stw	a0, (a1);		\
+	bmaski	a0, 0;			\
+	rts
+#   else /* __PIC__ */
+#    define SYSCALL_ERROR_HANDLER	\
+__local_syscall_error:			\
+	lrw	a1, rtld_errno;		\
+	rsubi	a0, 0;			\
+	stw	a0, (a1);		\
+	bmaski	a0, 0;			\
+	rts
+#   endif /* __PIC__ */
+#  else /* !RTLD_PRIVATE_ERRNO */
+#   ifdef __PIC__
+#    define SYSCALL_ERROR_HANDLER		\
+__local_syscall_error:				\
+	subi	sp, 8;				\
+	stw	a0, (sp, 0);			\
+	stw	r15, (sp, 4);			\
+	lrw	a1, __errno_location@PLT;	\
+	add	a1, gb;				\
+	ldw	a1, (a1);			\
+	jsr	a1;				\
+	ldw	a1, (sp, 0); /* load errno*/	\
+	ldw	r15, (sp, 4);			\
+	addi	sp, 8;				\
+	movi	a2, 0;				\
+	rsub	a1, a1, a2;			\
+	stw	a1, (a0);			\
+	bmaski	a0, 0;				\
+	rts
+#    else
+#     define SYSCALL_ERROR_HANDLER 		\
+__local_syscall_error:				\
+	subi	sp, 8;				\
+	stw	a0, (sp, 0);			\
+	stw	r15, (sp, 4);			\
+	lrw	a1, __errno_location;		\
+	jsr	a1;				\
+	ldw	a1, (sp, 0); /* load errno */	\
+	ldw	r15, (sp, 4);			\
+	addi	sp, 8;				\
+	movi	a2, 0;				\
+	rsub	a1, a1, a2;			\
+	stw	a1, (a0);			\
+	bmaski	a0, 0;				\
+	rts
+#   endif /* __PIC__ */
+#  endif/* RTLD_PRIVATE_ERROR */
+# else
+#  define SYSCALL_ERROR_HANDLER  /* Nothing here; code in sysdep.S is used.  */
+#  define SYSCALL_ERROR __syscall_error
+# endif/* IS_IN (libc) */
+
+/* define DO_CALL */
+# undef DO_CALL
+# define DO_CALL(syscall_name, args)	\
+  DOARGS_##args;			\
+  lrw	r7, SYS_ify(syscall_name);	\
+  trap	0;				\
+  UNDOARGS_##args
+
+# undef  DOARGS_0
+# define DOARGS_0			\
+	subi	sp, 8;			\
+	cfi_adjust_cfa_offset (8);	\
+	stw	r7, (sp, 0);		\
+	cfi_rel_offset (r7, 0);
+
+# undef  DOARGS_1
+# define DOARGS_1 DOARGS_0
+# undef  DOARGS_2
+# define DOARGS_2 DOARGS_0
+# undef  DOARGS_3
+# define DOARGS_3 DOARGS_0
+# undef  DOARGS_4
+# define DOARGS_4 DOARGS_0
+# undef  DOARGS_5
+# define DOARGS_5			\
+	subi	sp, 8;			\
+	cfi_adjust_cfa_offset (8);	\
+	stw	r7, (sp, 0);		\
+	cfi_rel_offset (7, 0);		\
+	stw	r4, (sp, 4);		\
+	cfi_rel_offset (4, 4);		\
+	ldw	r4, (sp, 8)
+# undef  DOARGS_6
+# define DOARGS_6			\
+	subi	sp, 16;			\
+	cfi_adjust_cfa_offset (16);	\
+	stw	r7, (sp, 0);		\
+	cfi_rel_offset (7, 0);		\
+	stw	r4, (sp, 4);		\
+	cfi_rel_offset (4, 4);		\
+	stw	r5, (sp, 8);		\
+	cfi_rel_offset (5, 8);		\
+	ldw	r4, (sp, 16);		\
+	ldw	r5, (sp, 20)
+
+# undef  UNDOARGS_0
+# define UNDOARGS_0 \
+  ldw  r7, (sp, 0); \
+  cfi_restore (r7); \
+  addi sp, 8;   \
+  cfi_adjust_cfa_offset (-8);
+
+# undef  UNDOARGS_1
+# define UNDOARGS_1 UNDOARGS_0
+# undef  UNDOARGS_2
+# define UNDOARGS_2 UNDOARGS_0
+# undef  UNDOARGS_3
+# define UNDOARGS_3 UNDOARGS_0
+# undef  UNDOARGS_4
+# define UNDOARGS_4 UNDOARGS_0
+# undef  UNDOARGS_5
+# define UNDOARGS_5			\
+	ldw	r7, (sp, 0);		\
+	cfi_restore (r4);		\
+	ldw	r4, (sp, 4);		\
+	cfi_restore (r4);		\
+	addi	sp, 8;			\
+	cfi_adjust_cfa_offset (-8);
+
+# undef  UNDOARGS_6
+# define UNDOARGS_6			\
+	ldw	r7, (sp, 0);		\
+	cfi_restore (r7);		\
+	ldw	r4, (sp, 4);		\
+	cfi_restore (r4);		\
+	ldw	r5, (sp, 8);		\
+	cfi_restore (r5);		\
+	addi	sp, 16;			\
+	cfi_adjust_cfa_offset (-16);
+
+#else /* not __ASSEMBLER__ */
+
+# undef INTERNAL_SYSCALL_RAW
+#  define INTERNAL_SYSCALL_RAW0(name, dummy...)				\
+  ({unsigned int __sys_result;						\
+     {									\
+       register int _a1 __asm__ ("a0"), _nr __asm__ ("r7");		\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr)				\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW1(name, arg1)				\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1);				\
+     {									\
+       register int _a1 __asm__ ("a0"), _nr __asm__ ("r7");		\
+       _a1 = _tmp_arg1;							\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1)			\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW2(name, arg1, arg2)			\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1), _tmp_arg2 = (int)(arg2);	\
+     {									\
+       register int _nr __asm__ ("r7");					\
+       register int _a1 __asm__ ("a0"), _a2 __asm__ ("a1");		\
+       _a1 = _tmp_arg1, _a2 = _tmp_arg2;				\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1), "r" (_a2)		\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW3(name, arg1, arg2, arg3)			\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1), _tmp_arg2 = (int)(arg2);	\
+    register int _tmp_arg3 = (int)(arg3);				\
+     {									\
+       register int _nr __asm__ ("r7");					\
+       register int _a1 __asm__ ("a0"), _a2 __asm__ ("a1");		\
+       register int _a3 __asm__ ("a2");					\
+       _a1 = _tmp_arg1;							\
+       _a2 = _tmp_arg2;							\
+       _a3 = _tmp_arg3;							\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1), "r" (_a2),		\
+			       "r" (_a3)				\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW4(name, arg1, arg2, arg3, arg4)		\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1), _tmp_arg2 = (int)(arg2);	\
+    register int _tmp_arg3 = (int)(arg3), _tmp_arg4 = (int)(arg4);	\
+     {									\
+       register int _nr __asm__ ("r7");					\
+       register int _a1 __asm__ ("a0"), _a2 __asm__ ("a1");		\
+       register int _a3 __asm__ ("a2"), _a4 __asm__ ("a3");		\
+       _a1 = _tmp_arg1, _a2 = _tmp_arg2, _a3 = _tmp_arg3;		\
+       _a4 = _tmp_arg4;							\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1), "r" (_a2),		\
+			       "r" (_a3), "r" (_a4)			\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW5(name, arg1, arg2, arg3, arg4,		\
+			      arg5)					\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1), _tmp_arg2 = (int)(arg2);	\
+    register int _tmp_arg3 = (int)(arg3), _tmp_arg4 = (int)(arg4);	\
+    register int _tmp_arg5 = (int)(arg5);				\
+     {									\
+       register int _nr __asm__ ("r7");					\
+       register int _a1 __asm__ ("a0"), _a2 __asm__ ("a1");		\
+       register int _a3 __asm__ ("a2"), _a4 __asm__ ("a3");		\
+       register int _a5 __asm__ ("r4");					\
+       _a1 = _tmp_arg1, _a2 = _tmp_arg2, _a3 = _tmp_arg3;		\
+       _a4 = _tmp_arg4, _a5 = _tmp_arg5;				\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1), "r" (_a2),		\
+			       "r" (_a3), "r" (_a4), "r" (_a5)		\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW6(name, arg1, arg2, arg3, arg4,		\
+			      arg5, arg6)				\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1), _tmp_arg2 = (int)(arg2);	\
+    register int _tmp_arg3 = (int)(arg3), _tmp_arg4 = (int)(arg4);	\
+    register int _tmp_arg5 = (int)(arg5), _tmp_arg6 = (int)(arg6);	\
+     {									\
+       register int _nr __asm__ ("r7");					\
+       register int _a1 __asm__ ("a0"), _a2 __asm__ ("a1");		\
+       register int _a3 __asm__ ("a2"), _a4 __asm__ ("a3");		\
+       register int _a5 __asm__ ("r4"), _a6 __asm__ ("r5");		\
+       _a1 = _tmp_arg1, _a2 = _tmp_arg2, _a3 = _tmp_arg3;		\
+       _a4 = _tmp_arg4, _a5 = _tmp_arg5, _a6 = _tmp_arg6;		\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1), "r" (_a2),		\
+			       "r" (_a3), "r" (_a4), "r" (_a5),		\
+			       "r" (_a6)				\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+#  define INTERNAL_SYSCALL_RAW7(name, arg1, arg2, arg3, arg4,		\
+			      arg5, arg6, arg7)				\
+  ({unsigned int __sys_result;						\
+    register int _tmp_arg1 = (int)(arg1), _tmp_arg2 = (int)(arg2);	\
+    register int _tmp_arg3 = (int)(arg3), _tmp_arg4 = (int)(arg4);	\
+    register int _tmp_arg5 = (int)(arg5), _tmp_arg6 = (int)(arg6);	\
+    register int _tmp_arg7 = (int)(arg7);				\
+     {									\
+       register int _nr __asm__ ("r7");					\
+       register int _a1 __asm__ ("a0"), _a2 __asm__ ("a1");		\
+       register int _a3 __asm__ ("a2"), _a4 __asm__ ("a3");		\
+       register int _a5 __asm__ ("r4"), _a6 __asm__ ("r5");		\
+       register int _a7 __asm__ ("r6");					\
+       _a1 = _tmp_arg1, _a2 = _tmp_arg2, _a3 = _tmp_arg3;		\
+       _a4 = _tmp_arg4, _a5 = _tmp_arg5, _a6 = _tmp_arg6;		\
+       _a7 = _tmp_arg7;							\
+       _nr = name;							\
+       __asm__ __volatile__ ("trap  0 \n\t"				\
+			     : "=r" (_a1)				\
+			     : "r" (_nr), "r" (_a1), "r" (_a2),		\
+			       "r" (_a3), "r" (_a4), "r" (_a5),		\
+			       "r" (_a6), "r" (_a7)			\
+			     : "memory");				\
+	       __sys_result = _a1;					\
+     }									\
+     (int) __sys_result; })
+
+# undef INTERNAL_SYSCALL
+# define INTERNAL_SYSCALL(name, nr, args...)			\
+  INTERNAL_SYSCALL_RAW##nr(SYS_ify(name), args)
+
+# undef INTERNAL_SYSCALL_NCS
+# define INTERNAL_SYSCALL_NCS(number, nr, args...)		\
+  INTERNAL_SYSCALL_RAW##nr (number, args)
+
+#undef HAVE_INTERNAL_BRK_ADDR_SYMBOL
+#define HAVE_INTERNAL_BRK_ADDR_SYMBOL 1
+
+#endif /* __ASSEMBLER__ */
+
+/* Pointer mangling support.  */
+#if (IS_IN (rtld) \
+     || (!defined SHARED && (IS_IN (libc) || IS_IN (libpthread))))
+# ifdef __ASSEMBLER__
+#  define PTR_MANGLE(dst, src, guard)			\
+	grs	t0, 1f;					\
+1:							\
+	lrw	guard, 1b@GOTPC;			\
+	addu	t0, guard;				\
+	lrw	guard, __pointer_chk_guard_local@GOT;	\
+	ldr.w	guard, (t0, guard << 0);		\
+	ldw	guard, (guard, 0);			\
+	xor	dst, src, guard;
+#  define PTR_DEMANGLE(dst, src, guard) PTR_MANGLE (dst, src, guard)
+#  define PTR_MANGLE2(dst, src, guard) \
+	xor	dst, src, guard
+#  define PTR_DEMANGLE2(dst, src, guard) PTR_MANGLE2 (dst, src, guard)
+# else
+extern uintptr_t __pointer_chk_guard_local;
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local)
+#  define PTR_DEMANGLE(var) PTR_MANGLE (var)
+# endif
+#else
+# ifdef __ASSEMBLER__
+#  define PTR_MANGLE(dst, src, guard)		\
+	grs	t0, 1f;				\
+1:						\
+	lrw	guard, 1b@GOTPC;		\
+	addu	t0, guard;			\
+	lrw	guard, __pointer_chk_guard@GOT;	\
+	ldr.w	guard, (t0, guard << 0);	\
+	ldw	guard, (guard, 0);		\
+	xor	dst, src, guard;
+#  define PTR_DEMANGLE(dst, src, guard) PTR_MANGLE (dst, src, guard)
+#  define PTR_MANGLE2(dst, src, guard) \
+	xor	dst, src, guard
+#  define PTR_DEMANGLE2(dst, src, guard) PTR_MANGLE2 (dst, src, guard)
+# else
+extern uintptr_t __pointer_chk_guard;
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard)
+#  define PTR_DEMANGLE(var) PTR_MANGLE (var)
+# endif
+#endif
+
+#endif /* linux/csky/sysdep.h */
lib/libc/glibc/sysdeps/unix/sysv/linux/mips/mips64/sysdep.h
@@ -0,0 +1,307 @@
+/* Copyright (C) 2000-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef _LINUX_MIPS_SYSDEP_H
+#define _LINUX_MIPS_SYSDEP_H 1
+
+/* There is some commonality.  */
+#include <sysdeps/unix/sysv/linux/mips/sysdep.h>
+#include <sysdeps/unix/sysv/linux/sysdep.h>
+#include <sysdeps/unix/mips/mips64/sysdep.h>
+
+#include <tls.h>
+
+/* For Linux we can use the system call table in the header file
+	/usr/include/asm/unistd.h
+   of the kernel.  But these symbols do not follow the SYS_* syntax
+   so we have to redefine the `SYS_ify' macro here.  */
+#undef SYS_ify
+#define SYS_ify(syscall_name)	__NR_##syscall_name
+
+#ifdef __ASSEMBLER__
+
+/* We don't want the label for the error handler to be visible in the symbol
+   table when we define it here.  */
+# undef SYSCALL_ERROR_LABEL
+# define SYSCALL_ERROR_LABEL 99b
+
+#else   /* ! __ASSEMBLER__ */
+
+#undef HAVE_INTERNAL_BRK_ADDR_SYMBOL
+#define HAVE_INTERNAL_BRK_ADDR_SYMBOL 1
+
+#if _MIPS_SIM == _ABIN32
+/* Convert X to a long long, without losing any bits if it is one
+   already or warning if it is a 32-bit pointer.  */
+# define ARGIFY(X) ((long long int) (__typeof__ ((X) - (X))) (X))
+typedef long long int __syscall_arg_t;
+#else
+# define ARGIFY(X) ((long int) (X))
+typedef long int __syscall_arg_t;
+#endif
+
+/* Note that the original Linux syscall restart convention required the
+   instruction immediately preceding SYSCALL to initialize $v0 with the
+   syscall number.  Then if a restart triggered, $v0 would have been
+   clobbered by the syscall interrupted, and needed to be reinititalized.
+   The kernel would decrement the PC by 4 before switching back to the
+   user mode so that $v0 had been reloaded before SYSCALL was executed
+   again.  This implied the place $v0 was loaded from must have been
+   preserved across a syscall, e.g. an immediate, static register, stack
+   slot, etc.
+
+   The convention was relaxed in Linux with a change applied to the kernel
+   GIT repository as commit 96187fb0bc30cd7919759d371d810e928048249d, that
+   first appeared in the 2.6.36 release.  Since then the kernel has had
+   code that reloads $v0 upon syscall restart and resumes right at the
+   SYSCALL instruction, so no special arrangement is needed anymore.
+
+   For backwards compatibility with existing kernel binaries we support
+   the old convention by choosing the instruction preceding SYSCALL
+   carefully.  This also means we have to force a 32-bit encoding of the
+   microMIPS MOVE instruction if one is used.  */
+
+#ifdef __mips_micromips
+# define MOVE32 "move32"
+#else
+# define MOVE32 "move"
+#endif
+
+#undef INTERNAL_SYSCALL
+#define INTERNAL_SYSCALL(name, nr, args...)			\
+	internal_syscall##nr ("li\t%0, %2\t\t\t# " #name "\n\t",	\
+			      "IK" (SYS_ify (name)),			\
+			      0, args)
+
+#undef INTERNAL_SYSCALL_NCS
+#define INTERNAL_SYSCALL_NCS(number, nr, args...)			\
+	internal_syscall##nr (MOVE32 "\t%0, %2\n\t",			\
+			      "r" (__s0),				\
+			      number, args)
+
+#define internal_syscall0(v0_init, input, number, dummy...)	\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a3 asm ("$7");			\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set reorder"							\
+	: "=r" (__v0), "=r" (__a3)					\
+	: input								\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#define internal_syscall1(v0_init, input, number, arg1)		\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	__syscall_arg_t _arg1 = ARGIFY (arg1);				\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a0 asm ("$4") = _arg1;		\
+	register __syscall_arg_t __a3 asm ("$7");			\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set reorder"							\
+	: "=r" (__v0), "=r" (__a3)					\
+	: input, "r" (__a0)						\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#define internal_syscall2(v0_init, input, number, arg1, arg2)	\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	__syscall_arg_t _arg1 = ARGIFY (arg1);				\
+	__syscall_arg_t _arg2 = ARGIFY (arg2);				\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a0 asm ("$4") = _arg1;		\
+	register __syscall_arg_t __a1 asm ("$5") = _arg2;		\
+	register __syscall_arg_t __a3 asm ("$7");			\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set\treorder"							\
+	: "=r" (__v0), "=r" (__a3)					\
+	: input, "r" (__a0), "r" (__a1)					\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#define internal_syscall3(v0_init, input, number, arg1, arg2, arg3)	\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	__syscall_arg_t _arg1 = ARGIFY (arg1);				\
+	__syscall_arg_t _arg2 = ARGIFY (arg2);				\
+	__syscall_arg_t _arg3 = ARGIFY (arg3);				\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a0 asm ("$4") = _arg1;		\
+	register __syscall_arg_t __a1 asm ("$5") = _arg2;		\
+	register __syscall_arg_t __a2 asm ("$6") = _arg3;		\
+	register __syscall_arg_t __a3 asm ("$7");			\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set\treorder"							\
+	: "=r" (__v0), "=r" (__a3)					\
+	: input, "r" (__a0), "r" (__a1), "r" (__a2)			\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#define internal_syscall4(v0_init, input, number, arg1, arg2, arg3, 	\
+			  arg4)						\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	__syscall_arg_t _arg1 = ARGIFY (arg1);				\
+	__syscall_arg_t _arg2 = ARGIFY (arg2);				\
+	__syscall_arg_t _arg3 = ARGIFY (arg3);				\
+	__syscall_arg_t _arg4 = ARGIFY (arg4);				\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a0 asm ("$4") = _arg1;		\
+	register __syscall_arg_t __a1 asm ("$5") = _arg2;		\
+	register __syscall_arg_t __a2 asm ("$6") = _arg3;		\
+	register __syscall_arg_t __a3 asm ("$7") = _arg4;		\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set\treorder"							\
+	: "=r" (__v0), "+r" (__a3)					\
+	: input, "r" (__a0), "r" (__a1), "r" (__a2)			\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#define internal_syscall5(v0_init, input, number, arg1, arg2, arg3, 	\
+			  arg4, arg5)					\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	__syscall_arg_t _arg1 = ARGIFY (arg1);				\
+	__syscall_arg_t _arg2 = ARGIFY (arg2);				\
+	__syscall_arg_t _arg3 = ARGIFY (arg3);				\
+	__syscall_arg_t _arg4 = ARGIFY (arg4);				\
+	__syscall_arg_t _arg5 = ARGIFY (arg5);				\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a0 asm ("$4") = _arg1;		\
+	register __syscall_arg_t __a1 asm ("$5") = _arg2;		\
+	register __syscall_arg_t __a2 asm ("$6") = _arg3;		\
+	register __syscall_arg_t __a3 asm ("$7") = _arg4;		\
+	register __syscall_arg_t __a4 asm ("$8") = _arg5;		\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set\treorder"							\
+	: "=r" (__v0), "+r" (__a3)					\
+	: input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4)		\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#define internal_syscall6(v0_init, input, number, arg1, arg2, arg3, 	\
+			  arg4, arg5, arg6)				\
+({									\
+	long int _sys_result;						\
+									\
+	{								\
+	__syscall_arg_t _arg1 = ARGIFY (arg1);				\
+	__syscall_arg_t _arg2 = ARGIFY (arg2);				\
+	__syscall_arg_t _arg3 = ARGIFY (arg3);				\
+	__syscall_arg_t _arg4 = ARGIFY (arg4);				\
+	__syscall_arg_t _arg5 = ARGIFY (arg5);				\
+	__syscall_arg_t _arg6 = ARGIFY (arg6);				\
+	register __syscall_arg_t __s0 asm ("$16") __attribute__ ((unused))\
+	  = (number);							\
+	register __syscall_arg_t __v0 asm ("$2");			\
+	register __syscall_arg_t __a0 asm ("$4") = _arg1;		\
+	register __syscall_arg_t __a1 asm ("$5") = _arg2;		\
+	register __syscall_arg_t __a2 asm ("$6") = _arg3;		\
+	register __syscall_arg_t __a3 asm ("$7") = _arg4;		\
+	register __syscall_arg_t __a4 asm ("$8") = _arg5;		\
+	register __syscall_arg_t __a5 asm ("$9") = _arg6;		\
+	__asm__ volatile (						\
+	".set\tnoreorder\n\t"						\
+	v0_init								\
+	"syscall\n\t"							\
+	".set\treorder"							\
+	: "=r" (__v0), "+r" (__a3)					\
+	: input, "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4),	\
+	  "r" (__a5)							\
+	: __SYSCALL_CLOBBERS);						\
+	_sys_result = __a3 != 0 ? -__v0 : __v0;				\
+	}								\
+	_sys_result;							\
+})
+
+#if __mips_isa_rev >= 6
+# define __SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
+	 "$14", "$15", "$24", "$25", "memory"
+#else
+# define __SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
+	 "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+#endif
+
+#endif /* __ASSEMBLER__ */
+
+/* Pointer mangling is not yet supported for MIPS.  */
+#define PTR_MANGLE(var) (void) (var)
+#define PTR_DEMANGLE(var) (void) (var)
+
+#endif /* linux/mips/sysdep.h */
lib/libc/glibc/sysdeps/unix/sysv/linux/mips/sysdep.h
@@ -0,0 +1,30 @@
+/* Syscall definitions, Linux MIPS generic version.
+   Copyright (C) 2019-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sgidefs.h>
+
+#define VDSO_NAME  "LINUX_2.6"
+#define VDSO_HASH  61765110
+
+/* List of system calls which are supported as vsyscalls.  */
+#define HAVE_CLOCK_GETTIME_VSYSCALL     "__vdso_clock_gettime"
+#if _MIPS_SIM != _ABI64
+# define HAVE_CLOCK_GETTIME64_VSYSCALL  "__vdso_clock_gettime64"
+#endif
+#define HAVE_GETTIMEOFDAY_VSYSCALL      "__vdso_gettimeofday"
+#define HAVE_CLOCK_GETRES_VSYSCALL      "__vdso_clock_getres"
lib/libc/glibc/sysdeps/unix/sysv/linux/nios2/sysdep.h
@@ -0,0 +1,240 @@
+/* Assembler macros for Nios II.
+   Copyright (C) 2000-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#ifndef _LINUX_NIOS2_SYSDEP_H
+#define _LINUX_NIOS2_SYSDEP_H 1
+
+#include <sysdeps/unix/sysdep.h>
+#include <sysdeps/nios2/sysdep.h>
+#include <sysdeps/unix/sysv/linux/generic/sysdep.h>
+
+/* For RTLD_PRIVATE_ERRNO.  */
+#include <dl-sysdep.h>
+
+#include <tls.h>
+
+/* For Linux we can use the system call table in the header file
+        /usr/include/asm/unistd.h
+   of the kernel.  But these symbols do not follow the SYS_* syntax
+   so we have to redefine the `SYS_ify' macro here.  */
+#undef SYS_ify
+#define SYS_ify(syscall_name)   __NR_##syscall_name
+
+#ifdef __ASSEMBLER__
+
+#undef SYSCALL_ERROR_LABEL
+#define SYSCALL_ERROR_LABEL __local_syscall_error
+
+#undef PSEUDO
+#define PSEUDO(name, syscall_name, args) \
+  ENTRY (name)                           \
+    DO_CALL (syscall_name, args)         \
+    bne r7, zero, SYSCALL_ERROR_LABEL;   \
+
+#undef PSEUDO_END
+#define PSEUDO_END(name) \
+  SYSCALL_ERROR_HANDLER  \
+  END (name)
+
+#undef PSEUDO_NOERRNO
+#define PSEUDO_NOERRNO(name, syscall_name, args) \
+  ENTRY (name)                                   \
+    DO_CALL (syscall_name, args)
+
+#undef PSEUDO_END_NOERRNO
+#define PSEUDO_END_NOERRNO(name) \
+  END (name)
+
+#undef ret_NOERRNO
+#define ret_NOERRNO ret
+
+#undef DO_CALL
+#define DO_CALL(syscall_name, args) \
+    DOARGS_##args                   \
+    movi r2, SYS_ify(syscall_name);  \
+    trap;
+
+#if defined(__PIC__) || defined(PIC)
+
+# if RTLD_PRIVATE_ERRNO
+
+#  define SYSCALL_ERROR_HANDLER			\
+  SYSCALL_ERROR_LABEL:				\
+  nextpc r3;					\
+1:						\
+  movhi r8, %hiadj(rtld_errno - 1b);		\
+  addi r8, r8, %lo(rtld_errno - 1b);		\
+  add r3, r3, r8;				\
+  stw r2, 0(r3);				\
+  movi r2, -1;					\
+  ret;
+
+# else
+
+#  if IS_IN (libc)
+#   define SYSCALL_ERROR_ERRNO __libc_errno
+#  else
+#   define SYSCALL_ERROR_ERRNO errno
+#  endif
+#  define SYSCALL_ERROR_HANDLER			\
+  SYSCALL_ERROR_LABEL:				\
+  nextpc r3;					\
+1:						\
+  movhi r8, %hiadj(_gp_got - 1b);		\
+  addi r8, r8, %lo(_gp_got - 1b);		\
+  add r3, r3, r8;				\
+  ldw r3, %tls_ie(SYSCALL_ERROR_ERRNO)(r3);	\
+  add r3, r23, r3;				\
+  stw r2, 0(r3);				\
+  movi r2, -1;					\
+  ret;
+
+# endif
+
+#else
+
+/* We can use a single error handler in the static library.  */
+#define SYSCALL_ERROR_HANDLER			\
+  SYSCALL_ERROR_LABEL:				\
+  jmpi __syscall_error;
+
+#endif
+
+#define DOARGS_0 /* nothing */
+#define DOARGS_1 /* nothing */
+#define DOARGS_2 /* nothing */
+#define DOARGS_3 /* nothing */
+#define DOARGS_4 /* nothing */
+#define DOARGS_5 ldw r8, 0(sp);
+#define DOARGS_6 ldw r9, 4(sp); ldw r8, 0(sp);
+
+/* The function has to return the error code.  */
+#undef  PSEUDO_ERRVAL
+#define PSEUDO_ERRVAL(name, syscall_name, args) \
+  ENTRY (name)                                  \
+    DO_CALL (syscall_name, args)
+
+#undef  PSEUDO_END_ERRVAL
+#define PSEUDO_END_ERRVAL(name) \
+  END (name)
+
+#define ret_ERRVAL ret
+
+#else /* __ASSEMBLER__ */
+
+/* In order to get __set_errno() definition in INLINE_SYSCALL.  */
+#include <errno.h>
+
+#undef INTERNAL_SYSCALL_RAW
+#define INTERNAL_SYSCALL_RAW(name, nr, args...)                 \
+  ({ unsigned int _sys_result;                                  \
+     {                                                          \
+       /* Load argument values in temporary variables
+	  to perform side effects like function calls
+	  before the call-used registers are set.  */		\
+       LOAD_ARGS_##nr (args)					\
+       LOAD_REGS_##nr						\
+       register int _r2 asm ("r2") = (int)(name);               \
+       register int _err asm ("r7");                            \
+       asm volatile ("trap"                                     \
+                     : "+r" (_r2), "=r" (_err)                  \
+                     : ASM_ARGS_##nr				\
+                     : __SYSCALL_CLOBBERS);                     \
+       _sys_result = _err != 0 ? -_r2 : _r2;                    \
+     }                                                          \
+     (int) _sys_result; })
+
+#undef INTERNAL_SYSCALL
+#define INTERNAL_SYSCALL(name, nr, args...) \
+	INTERNAL_SYSCALL_RAW(SYS_ify(name), nr, args)
+
+#undef INTERNAL_SYSCALL_NCS
+#define INTERNAL_SYSCALL_NCS(number, nr, args...) \
+	INTERNAL_SYSCALL_RAW(number, nr, args)
+
+#define LOAD_ARGS_0()
+#define LOAD_REGS_0
+#define ASM_ARGS_0
+#define LOAD_ARGS_1(a1)				\
+  LOAD_ARGS_0 ()				\
+  int __arg1 = (int) (a1);
+#define LOAD_REGS_1				\
+  register int _r4 asm ("r4") = __arg1;		\
+  LOAD_REGS_0
+#define ASM_ARGS_1                  "r" (_r4)
+#define LOAD_ARGS_2(a1, a2)			\
+  LOAD_ARGS_1 (a1)				\
+  int __arg2 = (int) (a2);
+#define LOAD_REGS_2				\
+  register int _r5 asm ("r5") = __arg2;		\
+  LOAD_REGS_1
+#define ASM_ARGS_2      ASM_ARGS_1, "r" (_r5)
+#define LOAD_ARGS_3(a1, a2, a3)			\
+  LOAD_ARGS_2 (a1, a2)				\
+  int __arg3 = (int) (a3);
+#define LOAD_REGS_3				\
+  register int _r6 asm ("r6") = __arg3;		\
+  LOAD_REGS_2
+#define ASM_ARGS_3      ASM_ARGS_2, "r" (_r6)
+#define LOAD_ARGS_4(a1, a2, a3, a4)		\
+  LOAD_ARGS_3 (a1, a2, a3)			\
+  int __arg4 = (int) (a4);
+#define LOAD_REGS_4				\
+  register int _r7 asm ("r7") = __arg4;		\
+  LOAD_REGS_3
+#define ASM_ARGS_4      ASM_ARGS_3, "r" (_r7)
+#define LOAD_ARGS_5(a1, a2, a3, a4, a5)		\
+  LOAD_ARGS_4 (a1, a2, a3, a4)			\
+  int __arg5 = (int) (a5);
+#define LOAD_REGS_5				\
+  register int _r8 asm ("r8") = __arg5;		\
+  LOAD_REGS_4
+#define ASM_ARGS_5      ASM_ARGS_4, "r" (_r8)
+#define LOAD_ARGS_6(a1, a2, a3, a4, a5, a6)	\
+  LOAD_ARGS_5 (a1, a2, a3, a4, a5)		\
+  int __arg6 = (int) (a6);
+#define LOAD_REGS_6			    \
+  register int _r9 asm ("r9") = __arg6;     \
+  LOAD_REGS_5
+#define ASM_ARGS_6      ASM_ARGS_5, "r" (_r9)
+
+#define __SYSCALL_CLOBBERS "memory"
+
+#undef HAVE_INTERNAL_BRK_ADDR_SYMBOL
+#define HAVE_INTERNAL_BRK_ADDR_SYMBOL 1
+
+#endif /* __ASSEMBLER__ */
+
+/* Pointer mangling support.  */
+#if IS_IN (rtld)
+/* We cannot use the thread descriptor because in ld.so we use setjmp
+   earlier than the descriptor is initialized.  */
+#else
+# ifdef __ASSEMBLER__
+#  define PTR_MANGLE_GUARD(guard) ldw guard, POINTER_GUARD(r23)
+#  define PTR_MANGLE(dst, src, guard) xor dst, src, guard
+#  define PTR_DEMANGLE(dst, src, guard) PTR_MANGLE (dst, src, guard)
+# else
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
+#  define PTR_DEMANGLE(var)	PTR_MANGLE (var)
+# endif
+#endif
+
+
+#endif /* linux/nios2/sysdep.h */
lib/libc/glibc/sysdeps/unix/sysv/linux/powerpc/sysdep.h
@@ -0,0 +1,265 @@
+/* Syscall definitions, Linux PowerPC generic version.
+   Copyright (C) 2019-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _LINUX_POWERPC_SYSDEP_H
+#define _LINUX_POWERPC_SYSDEP_H 1
+
+#include <sysdeps/unix/sysv/linux/sysdep.h>
+#include <sysdeps/unix/powerpc/sysdep.h>
+#include <tls.h>
+/* Define __set_errno() for INLINE_SYSCALL macro below.  */
+#include <errno.h>
+
+/* For Linux we can use the system call table in the header file
+       /usr/include/asm/unistd.h
+   of the kernel.  But these symbols do not follow the SYS_* syntax
+   so we have to redefine the `SYS_ify' macro here.  */
+#undef SYS_ify
+#define SYS_ify(syscall_name)  __NR_##syscall_name
+
+/* Define a macro which expands inline into the wrapper code for a system
+   call. This use is for internal calls that do not need to handle errors
+   normally. It will never touch errno. This returns just what the kernel
+   gave back in the non-error (CR0.SO cleared) case, otherwise (CR0.SO set)
+   the negation of the return value in the kernel gets reverted.  */
+
+#define INTERNAL_VSYSCALL_CALL_TYPE(funcptr, type, nr, args...)         \
+  ({									\
+    register void *r0  __asm__ ("r0");					\
+    register long int r3  __asm__ ("r3");				\
+    register long int r4  __asm__ ("r4");				\
+    register long int r5  __asm__ ("r5");				\
+    register long int r6  __asm__ ("r6");				\
+    register long int r7  __asm__ ("r7");				\
+    register long int r8  __asm__ ("r8");				\
+    register type rval  __asm__ ("r3");				        \
+    LOADARGS_##nr (funcptr, args);					\
+    __asm__ __volatile__						\
+      ("mtctr %0\n\t"							\
+       "bctrl\n\t"							\
+       "mfcr  %0\n\t"							\
+       "0:"								\
+       : "+r" (r0), "+r" (r3), "+r" (r4), "+r" (r5),  "+r" (r6),        \
+         "+r" (r7), "+r" (r8)						\
+       : : "r9", "r10", "r11", "r12",					\
+           "cr0", "cr1", "cr5", "cr6", "cr7",				\
+           "xer", "lr", "ctr", "memory");				\
+    __asm__ __volatile__ ("" : "=r" (rval) : "r" (r3));		        \
+    (long int) r0 & (1 << 28) ? -rval : rval;				\
+  })
+
+#define INTERNAL_VSYSCALL_CALL(funcptr, nr, args...)			\
+  INTERNAL_VSYSCALL_CALL_TYPE(funcptr, long int, nr, args)
+
+#define DECLARE_REGS				\
+  register long int r0  __asm__ ("r0");		\
+  register long int r3  __asm__ ("r3");		\
+  register long int r4  __asm__ ("r4");		\
+  register long int r5  __asm__ ("r5");		\
+  register long int r6  __asm__ ("r6");		\
+  register long int r7  __asm__ ("r7");		\
+  register long int r8  __asm__ ("r8");
+
+#define SYSCALL_SCV(nr)				\
+  ({						\
+    __asm__ __volatile__			\
+      (".machine \"push\"\n\t"			\
+       ".machine \"power9\"\n\t"		\
+       "scv 0\n\t"				\
+       ".machine \"pop\"\n\t"			\
+       "0:"					\
+       : "=&r" (r0),				\
+	 "=&r" (r3), "=&r" (r4), "=&r" (r5),	\
+	 "=&r" (r6), "=&r" (r7), "=&r" (r8)	\
+       : ASM_INPUT_##nr			\
+       : "r9", "r10", "r11", "r12",		\
+	 "cr0", "cr1", "cr5", "cr6", "cr7",	\
+	 "xer", "lr", "ctr", "memory"); 	\
+    r3;					\
+  })
+
+#define SYSCALL_SC(nr)				\
+  ({						\
+    __asm__ __volatile__			\
+      ("sc\n\t"				\
+       "mfcr %0\n\t"				\
+       "0:"					\
+       : "=&r" (r0),				\
+	 "=&r" (r3), "=&r" (r4), "=&r" (r5),	\
+	 "=&r" (r6), "=&r" (r7), "=&r" (r8)	\
+       : ASM_INPUT_##nr			\
+       : "r9", "r10", "r11", "r12",		\
+	 "xer", "cr0", "ctr", "memory");	\
+    r0 & (1 << 28) ? -r3 : r3;			\
+  })
+
+/* This will only be non-empty for 64-bit systems, see below.  */
+#define TRY_SYSCALL_SCV(nr)
+
+#if defined(__PPC64__) || defined(__powerpc64__)
+# define SYSCALL_ARG_SIZE 8
+
+/* For the static case, unlike the dynamic loader, there is no compile-time way
+   to check if we are inside startup code.  So we need to check if the thread
+   pointer has already been setup before trying to access the TLS.  */
+# ifndef SHARED
+#  define CHECK_THREAD_POINTER (__thread_register != 0)
+# else
+#  define CHECK_THREAD_POINTER (1)
+# endif
+
+/* When inside the dynamic loader, the thread pointer may not have been
+   initialized yet, so don't check for scv support in that case.  */
+# if defined(USE_PPC_SCV) && !IS_IN(rtld)
+#  undef TRY_SYSCALL_SCV
+#  define TRY_SYSCALL_SCV(nr)						\
+  CHECK_THREAD_POINTER && THREAD_GET_HWCAP() & PPC_FEATURE2_SCV ?	\
+      SYSCALL_SCV(nr) :
+# endif
+
+#else
+# define SYSCALL_ARG_SIZE 4
+#endif
+
+# define INTERNAL_SYSCALL_NCS(name, nr, args...)	\
+  ({							\
+    DECLARE_REGS;					\
+    LOADARGS_##nr (name, ##args);			\
+    TRY_SYSCALL_SCV(nr)					\
+    SYSCALL_SC(nr);					\
+  })
+
+#undef INTERNAL_SYSCALL
+#define INTERNAL_SYSCALL(name, nr, args...)				\
+  INTERNAL_SYSCALL_NCS (__NR_##name, nr, args)
+
+#define LOADARGS_0(name, dummy) \
+	r0 = name
+#define LOADARGS_1(name, __arg1) \
+	long int _arg1 = (long int) (__arg1); \
+	LOADARGS_0(name, 0); \
+	extern void __illegally_sized_syscall_arg1 (void); \
+	if (__builtin_classify_type (__arg1) != 5 \
+	    && sizeof (__arg1) > SYSCALL_ARG_SIZE) \
+	  __illegally_sized_syscall_arg1 (); \
+	r3 = _arg1
+#define LOADARGS_2(name, __arg1, __arg2) \
+	long int _arg2 = (long int) (__arg2); \
+	LOADARGS_1(name, __arg1); \
+	extern void __illegally_sized_syscall_arg2 (void); \
+	if (__builtin_classify_type (__arg2) != 5 \
+	    && sizeof (__arg2) > SYSCALL_ARG_SIZE) \
+	  __illegally_sized_syscall_arg2 (); \
+	r4 = _arg2
+#define LOADARGS_3(name, __arg1, __arg2, __arg3) \
+	long int _arg3 = (long int) (__arg3); \
+	LOADARGS_2(name, __arg1, __arg2); \
+	extern void __illegally_sized_syscall_arg3 (void); \
+	if (__builtin_classify_type (__arg3) != 5 \
+	    && sizeof (__arg3) > SYSCALL_ARG_SIZE) \
+	  __illegally_sized_syscall_arg3 (); \
+	r5 = _arg3
+#define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
+	long int _arg4 = (long int) (__arg4); \
+	LOADARGS_3(name, __arg1, __arg2, __arg3); \
+	extern void __illegally_sized_syscall_arg4 (void); \
+	if (__builtin_classify_type (__arg4) != 5 \
+	    && sizeof (__arg4) > SYSCALL_ARG_SIZE) \
+	  __illegally_sized_syscall_arg4 (); \
+	r6 = _arg4
+#define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
+	long int _arg5 = (long int) (__arg5); \
+	LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
+	extern void __illegally_sized_syscall_arg5 (void); \
+	if (__builtin_classify_type (__arg5) != 5 \
+	    && sizeof (__arg5) > SYSCALL_ARG_SIZE) \
+	  __illegally_sized_syscall_arg5 (); \
+	r7 = _arg5
+#define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
+	long int _arg6 = (long int) (__arg6); \
+	LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
+	extern void __illegally_sized_syscall_arg6 (void); \
+	if (__builtin_classify_type (__arg6) != 5 \
+	    && sizeof (__arg6) > SYSCALL_ARG_SIZE) \
+	  __illegally_sized_syscall_arg6 (); \
+	r8 = _arg6
+
+#define ASM_INPUT_0 "0" (r0)
+#define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
+#define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
+#define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
+#define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
+#define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
+#define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
+
+
+/* Pointer mangling support.  */
+#if IS_IN (rtld)
+/* We cannot use the thread descriptor because in ld.so we use setjmp
+   earlier than the descriptor is initialized.  */
+#else
+# ifdef __ASSEMBLER__
+#  if defined(__PPC64__) || defined(__powerpc64__)
+#   define LOAD  ld
+#   define TPREG r13
+#  else
+#   define LOAD  lwz
+#   define TPREG r2
+#  endif
+#  define PTR_MANGLE(reg, tmpreg) \
+	LOAD	tmpreg,POINTER_GUARD(TPREG); \
+	xor	reg,tmpreg,reg
+#  define PTR_MANGLE2(reg, tmpreg) \
+	xor	reg,tmpreg,reg
+#  define PTR_MANGLE3(destreg, reg, tmpreg) \
+	LOAD	tmpreg,POINTER_GUARD(TPREG); \
+	xor	destreg,tmpreg,reg
+#  define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
+#  define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
+#  define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
+# else
+#  define PTR_MANGLE(var) \
+  (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
+#  define PTR_DEMANGLE(var)	PTR_MANGLE (var)
+# endif
+#endif
+
+/* List of system calls which are supported as vsyscalls.  */
+#define VDSO_NAME  "LINUX_2.6.15"
+#define VDSO_HASH  123718565
+
+#if defined(__PPC64__) || defined(__powerpc64__)
+#define HAVE_CLOCK_GETRES64_VSYSCALL	"__kernel_clock_getres"
+#define HAVE_CLOCK_GETTIME64_VSYSCALL	"__kernel_clock_gettime"
+#else
+#define HAVE_CLOCK_GETRES_VSYSCALL	"__kernel_clock_getres"
+#define HAVE_CLOCK_GETTIME_VSYSCALL	"__kernel_clock_gettime"
+#endif
+#define HAVE_GETCPU_VSYSCALL		"__kernel_getcpu"
+#define HAVE_TIME_VSYSCALL		"__kernel_time"
+#define HAVE_GETTIMEOFDAY_VSYSCALL      "__kernel_gettimeofday"
+#define HAVE_GET_TBFREQ                 "__kernel_get_tbfreq"
+
+#if defined(__PPC64__) || defined(__powerpc64__)
+# define HAVE_SIGTRAMP_RT64		"__kernel_sigtramp_rt64"
+#else
+# define HAVE_SIGTRAMP_32		"__kernel_sigtramp32"
+# define HAVE_SIGTRAMP_RT32		"__kernel_sigtramp_rt32"
+#endif
+
+#endif /* _LINUX_POWERPC_SYSDEP_H  */
lib/libc/glibc/sysdeps/unix/sysv/linux/s390/sysdep.h
@@ -0,0 +1,113 @@
+/* Syscall definitions, Linux s390 version.
+   Copyright (C) 2019-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef __ASSEMBLY__
+
+#undef SYS_ify
+#define SYS_ify(syscall_name)	__NR_##syscall_name
+
+#undef INTERNAL_SYSCALL_DIRECT
+#define INTERNAL_SYSCALL_DIRECT(name, nr, args...)			      \
+  ({									      \
+    DECLARGS_##nr(args)							      \
+    register long int _ret __asm__("2");				      \
+    __asm__ __volatile__ (						      \
+			  "svc    %b1\n\t"				      \
+			  : "=d" (_ret)					      \
+			  : "i" (__NR_##name) ASMFMT_##nr		      \
+			  : "memory" );					      \
+    _ret; })
+
+#undef INTERNAL_SYSCALL_SVC0
+#define INTERNAL_SYSCALL_SVC0(name, nr, args...)			      \
+  ({									      \
+    DECLARGS_##nr(args)							      \
+    register unsigned long int _nr __asm__("1") =			      \
+      (unsigned long int)(__NR_##name);					      \
+    register long int _ret __asm__("2");				      \
+    __asm__ __volatile__ (						      \
+			  "svc    0\n\t"				      \
+			  : "=d" (_ret)					      \
+			  : "d" (_nr) ASMFMT_##nr			      \
+			  : "memory" );					      \
+    _ret; })
+
+#undef INTERNAL_SYSCALL_NCS
+#define INTERNAL_SYSCALL_NCS(no, nr, args...)				      \
+  ({									      \
+    DECLARGS_##nr(args)							      \
+    register unsigned long int _nr __asm__("1") = (unsigned long int)(no);    \
+    register long int _ret __asm__("2");				      \
+    __asm__ __volatile__ (						      \
+			  "svc    0\n\t"				      \
+			  : "=d" (_ret)					      \
+			  : "d" (_nr) ASMFMT_##nr			      \
+			  : "memory" );					      \
+    _ret; })
+
+#undef INTERNAL_SYSCALL
+#define INTERNAL_SYSCALL(name, nr, args...)				      \
+  (((__NR_##name) < 256)						      \
+   ? INTERNAL_SYSCALL_DIRECT(name, nr, args)				      \
+   : INTERNAL_SYSCALL_SVC0(name, nr, args))
+
+#define DECLARGS_0()
+#define DECLARGS_1(arg1) \
+  register unsigned long int gpr2 __asm__ ("2") = (unsigned long int)(arg1);
+#define DECLARGS_2(arg1, arg2) \
+  DECLARGS_1(arg1) \
+  register unsigned long int gpr3 __asm__ ("3") = (unsigned long int)(arg2);
+#define DECLARGS_3(arg1, arg2, arg3) \
+  DECLARGS_2(arg1, arg2) \
+  register unsigned long int gpr4 __asm__ ("4") = (unsigned long int)(arg3);
+#define DECLARGS_4(arg1, arg2, arg3, arg4) \
+  DECLARGS_3(arg1, arg2, arg3) \
+  register unsigned long int gpr5 __asm__ ("5") = (unsigned long int)(arg4);
+#define DECLARGS_5(arg1, arg2, arg3, arg4, arg5) \
+  DECLARGS_4(arg1, arg2, arg3, arg4) \
+  register unsigned long int gpr6 __asm__ ("6") = (unsigned long int)(arg5);
+#define DECLARGS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
+  DECLARGS_5(arg1, arg2, arg3, arg4, arg5) \
+  register unsigned long int gpr7 __asm__ ("7") = (unsigned long int)(arg6);
+
+#define ASMFMT_0
+#define ASMFMT_1 , "0" (gpr2)
+#define ASMFMT_2 , "0" (gpr2), "d" (gpr3)
+#define ASMFMT_3 , "0" (gpr2), "d" (gpr3), "d" (gpr4)
+#define ASMFMT_4 , "0" (gpr2), "d" (gpr3), "d" (gpr4), "d" (gpr5)
+#define ASMFMT_5 , "0" (gpr2), "d" (gpr3), "d" (gpr4), "d" (gpr5), "d" (gpr6)
+#define ASMFMT_6 , "0" (gpr2), "d" (gpr3), "d" (gpr4), "d" (gpr5), "d" (gpr6), "d" (gpr7)
+
+#define SINGLE_THREAD_BY_GLOBAL		1
+
+
+#define VDSO_NAME  "LINUX_2.6.29"
+#define VDSO_HASH  123718585
+
+/* List of system calls which are supported as vsyscalls.  */
+#ifdef __s390x__
+#define HAVE_CLOCK_GETRES64_VSYSCALL	"__kernel_clock_getres"
+#define HAVE_CLOCK_GETTIME64_VSYSCALL	"__kernel_clock_gettime"
+#else
+#define HAVE_CLOCK_GETRES_VSYSCALL	"__kernel_clock_getres"
+#define HAVE_CLOCK_GETTIME_VSYSCALL	"__kernel_clock_gettime"
+#endif
+#define HAVE_GETTIMEOFDAY_VSYSCALL	"__kernel_gettimeofday"
+#define HAVE_GETCPU_VSYSCALL		"__kernel_getcpu"
+
+#endif