Add support for riscv64 (WIP)

The macros to call syscalls were taken from glibc master.

Signed-off-by: David Abdurachmanov <david.abdurachmanov@gmail.com>
This commit is contained in:
David Abdurachmanov 2018-04-30 08:08:56 +02:00
parent 26423f95b1
commit 620fd4e8fd
2 changed files with 220 additions and 2 deletions

View File

@ -0,0 +1,208 @@
diff --git a/redhat_lsb_trigger.c b/redhat_lsb_trigger.c
index 1f548f1..6ad4372 100644
--- a/redhat_lsb_trigger.c
+++ b/redhat_lsb_trigger.c
@@ -520,6 +520,194 @@ register void *__thread_self __asm ("g7");
# define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
INTERNAL_SYSCALL_RAW (number, err, nr, args)
+#elif defined __riscv
+#undef SYS_ify
+#define SYS_ify(syscall_name) (__NR_##syscall_name)
+
+# undef INLINE_SYSCALL
+# define INLINE_SYSCALL(name, nr, args...) \
+ ({ INTERNAL_SYSCALL_DECL (err); \
+ long int __sys_result = INTERNAL_SYSCALL (name, err, nr, args); \
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (__sys_result, ))) \
+ { \
+ __set_errno (INTERNAL_SYSCALL_ERRNO (__sys_result, )); \
+ __sys_result = (unsigned long) -1; \
+ } \
+ __sys_result; })
+
+# define INTERNAL_SYSCALL_DECL(err) do { } while (0)
+
+# define INTERNAL_SYSCALL_ERROR_P(val, err) \
+ ((unsigned long int) (val) > -4096UL)
+
+# define INTERNAL_SYSCALL_ERRNO(val, err) (-val)
+
+# define INTERNAL_SYSCALL(name, err, nr, args...) \
+ internal_syscall##nr (SYS_ify (name), err, args)
+
+# define INTERNAL_SYSCALL_NCS(number, err, nr, args...) \
+ internal_syscall##nr (number, err, args)
+
+# define internal_syscall0(number, err, dummy...) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0"); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "=r" (__a0) \
+ : "r" (__a7) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall1(number, err, arg0) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall2(number, err, arg0, arg1) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ register long int __a1 asm ("a1") = (long int) (arg1); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), "r" (__a1) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall3(number, err, arg0, arg1, arg2) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ register long int __a1 asm ("a1") = (long int) (arg1); \
+ register long int __a2 asm ("a2") = (long int) (arg2); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), "r" (__a1), "r" (__a2) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall4(number, err, arg0, arg1, arg2, arg3) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ register long int __a1 asm ("a1") = (long int) (arg1); \
+ register long int __a2 asm ("a2") = (long int) (arg2); \
+ register long int __a3 asm ("a3") = (long int) (arg3); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), "r" (__a1), "r" (__a2), "r" (__a3) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall5(number, err, arg0, arg1, arg2, arg3, arg4) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ register long int __a1 asm ("a1") = (long int) (arg1); \
+ register long int __a2 asm ("a2") = (long int) (arg2); \
+ register long int __a3 asm ("a3") = (long int) (arg3); \
+ register long int __a4 asm ("a4") = (long int) (arg4); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), "r"(__a1), "r"(__a2), "r"(__a3), "r" (__a4) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall6(number, err, arg0, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ register long int __a1 asm ("a1") = (long int) (arg1); \
+ register long int __a2 asm ("a2") = (long int) (arg2); \
+ register long int __a3 asm ("a3") = (long int) (arg3); \
+ register long int __a4 asm ("a4") = (long int) (arg4); \
+ register long int __a5 asm ("a5") = (long int) (arg5); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), "r" (__a1), "r" (__a2), "r" (__a3), \
+ "r" (__a4), "r" (__a5) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define internal_syscall7(number, err, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ long int _sys_result; \
+ \
+ { \
+ register long int __a7 asm ("a7") = number; \
+ register long int __a0 asm ("a0") = (long int) (arg0); \
+ register long int __a1 asm ("a1") = (long int) (arg1); \
+ register long int __a2 asm ("a2") = (long int) (arg2); \
+ register long int __a3 asm ("a3") = (long int) (arg3); \
+ register long int __a4 asm ("a4") = (long int) (arg4); \
+ register long int __a5 asm ("a5") = (long int) (arg5); \
+ register long int __a6 asm ("a6") = (long int) (arg6); \
+ __asm__ volatile ( \
+ "scall\n\t" \
+ : "+r" (__a0) \
+ : "r" (__a7), "r" (__a1), "r" (__a2), "r" (__a3), \
+ "r" (__a4), "r" (__a5), "r" (__a6) \
+ : __SYSCALL_CLOBBERS); \
+ _sys_result = __a0; \
+ } \
+ _sys_result; \
+})
+
+# define __SYSCALL_CLOBBERS "memory"
#endif
#ifdef __i386__
@@ -682,7 +870,7 @@ int __libc_start_main (int argc, char **argv, char **ev,
void *auxvec, void (*rtld_fini) (void),
struct startup_info *stinfo,
char **stack_on_entry)
-#elif defined __arm__ || defined __aarch64__
+#elif defined __arm__ || defined __aarch64__ || defined __riscv
int ___libc_start_main (int (*main) (int argc, char **argv),
int argc, char **argv,

View File

@ -51,6 +51,11 @@
%global lsbldso ld-lsb-aarch64.so
%endif
%ifarch riscv64
%global ldso ld-linux-riscv64-lp64d.so.1
%global lsbldso ld-lsb-riscv64.so
%endif
%global upstreamlsbrelver 2.0
%global lsbrelver 4.1
%global srcrelease 1
@ -70,6 +75,7 @@ Patch1: redhat-lsb-lsb_start_daemon-fix.patch
Patch2: redhat-lsb-trigger.patch
Patch3: redhat-lsb-arm.patch
Patch4: redhat-lsb-aarch64.patch
Patch5: redhat-lsb-4.1-add-riscv64.patch
License: GPLv2
BuildRequires: glibc-static
BuildRequires: perl-generators
@ -106,8 +112,11 @@ BuildRequires: gcc
%ifarch aarch64
%global archname aarch64
%endif
%ifarch riscv64
%global archname riscv64
%endif
ExclusiveArch: %{ix86} ia64 x86_64 ppc ppc64 s390 s390x %{arm} aarch64 ppc64le
ExclusiveArch: %{ix86} ia64 x86_64 ppc ppc64 s390 s390x %{arm} aarch64 ppc64le riscv64
Requires: redhat-lsb-core%{?_isa} = %{version}-%{release}
Requires: redhat-lsb-cxx%{?_isa} = %{version}-%{release}
@ -350,7 +359,7 @@ Requires: libjpeg-turbo%{?_isa}
%ifarch %{ix86} ppc s390 arm
Requires: libpng12.so.0
%endif
%ifarch x86_64 ppc64 s390x aarch64 ppc64le
%ifarch x86_64 ppc64 s390x aarch64 ppc64le riscv64
Requires: libpng12.so.0()(64bit)
%endif
Requires: libpng%{?_isa}
@ -479,6 +488,7 @@ to be on LSB conforming system.
%patch2 -p0 -b .triggerfix
%patch3 -p1 -b .arm
%patch4 -p1 -b .aarch64
%patch5 -p1 -b .riscv64
%build
cd lsb-release-%{upstreamlsbrelver}