Compare commits

...

2 Commits

Author SHA1 Message Date
510c20fc9b
Ignore test results for now
Signed-off-by: David Abdurachmanov <davidlt@rivosinc.com>
2022-09-23 22:57:15 +03:00
a1f9b7af57
Add support for riscv64
Signed-off-by: David Abdurachmanov <davidlt@rivosinc.com>
2022-09-23 22:18:45 +03:00
3 changed files with 1106 additions and 4 deletions

834
libffi-3.1-riscv.patch Normal file
View File

@ -0,0 +1,834 @@
diff --git a/Makefile.am b/Makefile.am
index 1dcdc811..79b0cc55 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -31,6 +31,7 @@ EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj \
src/powerpc/asm.h src/powerpc/aix.S src/powerpc/darwin.S \
src/powerpc/aix_closure.S src/powerpc/darwin_closure.S \
src/powerpc/ffi_darwin.c src/powerpc/ffitarget.h \
+ src/riscv/ffi.c src/riscv/sysv.S src/riscv/ffitarget.h \
src/s390/ffi.c src/s390/sysv.S src/s390/ffitarget.h \
src/sh/ffi.c src/sh/sysv.S src/sh/ffitarget.h src/sh64/ffi.c \
src/sh64/sysv.S src/sh64/ffitarget.h src/sparc/v8.S \
@@ -232,6 +233,9 @@ endif
if VAX
nodist_libffi_la_SOURCES += src/vax/elfbsd.S src/vax/ffi.c
endif
+if RISCV
+nodist_libffi_la_SOURCES += src/riscv/sysv.S src/riscv/ffi.c
+endif
libffi_convenience_la_SOURCES = $(libffi_la_SOURCES)
nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES)
diff --git a/configure.ac b/configure.ac
index d3b8b99b..1c321d54 100644
--- a/configure.ac
+++ b/configure.ac
@@ -257,6 +257,10 @@ case "$host" in
TARGET=POWERPC; TARGETDIR=powerpc
;;
+ riscv32-*-* | riscv64-*-*)
+ TARGET=RISCV; TARGETDIR=riscv
+ ;;
+
s390-*-* | s390x-*-*)
TARGET=S390; TARGETDIR=s390
;;
@@ -316,6 +320,7 @@ AM_CONDITIONAL(POWERPC, test x$TARGET = xPOWERPC)
AM_CONDITIONAL(POWERPC_AIX, test x$TARGET = xPOWERPC_AIX)
AM_CONDITIONAL(POWERPC_DARWIN, test x$TARGET = xPOWERPC_DARWIN)
AM_CONDITIONAL(POWERPC_FREEBSD, test x$TARGET = xPOWERPC_FREEBSD)
+AM_CONDITIONAL(RISCV, test x$TARGET = xRISCV)
AM_CONDITIONAL(AARCH64, test x$TARGET = xAARCH64)
AM_CONDITIONAL(ARC, test x$TARGET = xARC)
AM_CONDITIONAL(ARM, test x$TARGET = xARM)
@@ -585,6 +590,10 @@ AC_ARG_ENABLE(purify-safety,
AC_DEFINE(USING_PURIFY, 1, [Define this if you are using Purify and want to suppress spurious messages.])
fi)
+AC_ARG_ENABLE(multi-os-directory,
+[ --disable-multi-os-directory
+ disable use of gcc --print-multi-os-directory to change the library installation directory])
+
# These variables are only ever used when we cross-build to X86_WIN32.
# And we only support this with GCC, so...
if test "x$GCC" = "xyes"; then
@@ -596,11 +605,13 @@ if test "x$GCC" = "xyes"; then
toolexecdir='$(libdir)/gcc-lib/$(target_alias)'
toolexeclibdir='$(libdir)'
fi
- multi_os_directory=`$CC $CFLAGS -print-multi-os-directory`
- case $multi_os_directory in
- .) ;; # Avoid trailing /.
- ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;;
- esac
+ if test x"$enable_multi_os_directory" != x"no"; then
+ multi_os_directory=`$CC $CFLAGS -print-multi-os-directory`
+ case $multi_os_directory in
+ .) ;; # Avoid trailing /.
+ ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;;
+ esac
+ fi
AC_SUBST(toolexecdir)
else
toolexeclibdir='$(libdir)'
diff --git a/include/ffi_common.h b/include/ffi_common.h
index 37f5a9e9..cb7916eb 100644
--- a/include/ffi_common.h
+++ b/include/ffi_common.h
@@ -74,7 +74,9 @@ void ffi_type_test(ffi_type *a, char *file, int line);
#define FFI_ASSERT_VALID_TYPE(x)
#endif
+/* v cast to size_t and aligned up to a multiple of a */
#define ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1)
+/* v cast to size_t and aligned down to a multiple of a */
#define ALIGN_DOWN(v, a) (((size_t) (v)) & -a)
/* Perform machine dependent cif processing */
diff --git a/src/riscv/ffi.c b/src/riscv/ffi.c
new file mode 100644
index 00000000..b664ee7d
--- /dev/null
+++ b/src/riscv/ffi.c
@@ -0,0 +1,445 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2015 Michael Knyszek <mknyszek@berkeley.edu>
+ 2015 Andrew Waterman <waterman@cs.berkeley.edu>
+ 2018 Stef O'Rear <sorear2@gmail.com>
+ Based on MIPS N32/64 port
+
+ RISC-V Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#if __riscv_float_abi_double
+#define ABI_FLEN 64
+#define ABI_FLOAT double
+#elif __riscv_float_abi_single
+#define ABI_FLEN 32
+#define ABI_FLOAT float
+#endif
+
+#define NARGREG 8
+#define STKALIGN 16
+#define MAXCOPYARG (2 * sizeof(double))
+
+typedef struct call_context
+{
+#if ABI_FLEN
+ ABI_FLOAT fa[8];
+#endif
+ size_t a[8];
+ /* used by the assembly code to in-place construct its own stack frame */
+ char frame[16];
+} call_context;
+
+typedef struct call_builder
+{
+ call_context *aregs;
+ int used_integer;
+ int used_float;
+ size_t *used_stack;
+} call_builder;
+
+/* integer (not pointer) less than ABI XLEN */
+/* FFI_TYPE_INT does not appear to be used */
+#if __SIZEOF_POINTER__ == 8
+#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64)
+#else
+#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32)
+#endif
+
+#if ABI_FLEN
+typedef struct {
+ char as_elements, type1, offset2, type2;
+} float_struct_info;
+
+#if ABI_FLEN >= 64
+#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE)
+#else
+#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT)
+#endif
+
+static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) {
+ int i;
+ if (out == out_end) return out;
+ if (in->type != FFI_TYPE_STRUCT) {
+ *(out++) = in;
+ } else {
+ for (i = 0; in->elements[i]; i++)
+ out = flatten_struct(in->elements[i], out, out_end);
+ }
+ return out;
+}
+
+/* Structs with at most two fields after flattening, one of which is of
+ floating point type, are passed in multiple registers if sufficient
+ registers are available. */
+static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) {
+ float_struct_info ret = {0, 0, 0, 0};
+ ffi_type *fields[3];
+ int num_floats, num_ints;
+ int num_fields = flatten_struct(top, fields, fields + 3) - fields;
+
+ if (num_fields == 1) {
+ if (IS_FLOAT(fields[0]->type)) {
+ ret.as_elements = 1;
+ ret.type1 = fields[0]->type;
+ }
+ } else if (num_fields == 2) {
+ num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type);
+ num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type);
+ if (num_floats == 0 || num_floats + num_ints != 2)
+ return ret;
+ if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG)
+ return ret;
+ if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type))
+ return ret;
+
+ ret.type1 = fields[0]->type;
+ ret.type2 = fields[1]->type;
+ ret.offset2 = ALIGN(fields[0]->size, fields[1]->alignment);
+ ret.as_elements = 1;
+ }
+
+ return ret;
+}
+#endif
+
+/* allocates a single register, float register, or XLEN-sized stack slot to a datum */
+static void marshal_atom(call_builder *cb, int type, void *data) {
+ size_t value = 0;
+ switch (type) {
+ case FFI_TYPE_UINT8: value = *(uint8_t *)data; break;
+ case FFI_TYPE_SINT8: value = *(int8_t *)data; break;
+ case FFI_TYPE_UINT16: value = *(uint16_t *)data; break;
+ case FFI_TYPE_SINT16: value = *(int16_t *)data; break;
+ /* 32-bit quantities are always sign-extended in the ABI */
+ case FFI_TYPE_UINT32: value = *(int32_t *)data; break;
+ case FFI_TYPE_SINT32: value = *(int32_t *)data; break;
+#if __SIZEOF_POINTER__ == 8
+ case FFI_TYPE_UINT64: value = *(uint64_t *)data; break;
+ case FFI_TYPE_SINT64: value = *(int64_t *)data; break;
+#endif
+ case FFI_TYPE_POINTER: value = *(size_t *)data; break;
+
+ /* float values may be recoded in an implementation-defined way
+ by hardware conforming to 2.1 or earlier, so use asm to
+ reinterpret floats as doubles */
+#if ABI_FLEN >= 32
+ case FFI_TYPE_FLOAT:
+ asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data));
+ return;
+#endif
+#if ABI_FLEN >= 64
+ case FFI_TYPE_DOUBLE:
+ asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data));
+ return;
+#endif
+ default: FFI_ASSERT(0); break;
+ }
+
+ if (cb->used_integer == NARGREG) {
+ *cb->used_stack++ = value;
+ } else {
+ cb->aregs->a[cb->used_integer++] = value;
+ }
+}
+
+static void unmarshal_atom(call_builder *cb, int type, void *data) {
+ size_t value;
+ switch (type) {
+#if ABI_FLEN >= 32
+ case FFI_TYPE_FLOAT:
+ asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++]));
+ return;
+#endif
+#if ABI_FLEN >= 64
+ case FFI_TYPE_DOUBLE:
+ asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++]));
+ return;
+#endif
+ }
+
+ if (cb->used_integer == NARGREG) {
+ value = *cb->used_stack++;
+ } else {
+ value = cb->aregs->a[cb->used_integer++];
+ }
+
+ switch (type) {
+ case FFI_TYPE_UINT8: *(uint8_t *)data = value; break;
+ case FFI_TYPE_SINT8: *(uint8_t *)data = value; break;
+ case FFI_TYPE_UINT16: *(uint16_t *)data = value; break;
+ case FFI_TYPE_SINT16: *(uint16_t *)data = value; break;
+ case FFI_TYPE_UINT32: *(uint32_t *)data = value; break;
+ case FFI_TYPE_SINT32: *(uint32_t *)data = value; break;
+#if __SIZEOF_POINTER__ == 8
+ case FFI_TYPE_UINT64: *(uint64_t *)data = value; break;
+ case FFI_TYPE_SINT64: *(uint64_t *)data = value; break;
+#endif
+ case FFI_TYPE_POINTER: *(size_t *)data = value; break;
+ default: FFI_ASSERT(0); break;
+ }
+}
+
+/* adds an argument to a call, or a not by reference return value */
+static void marshal(call_builder *cb, ffi_type *type, int var, void *data) {
+ size_t realign[2];
+
+#if ABI_FLEN
+ if (!var && type->type == FFI_TYPE_STRUCT) {
+ float_struct_info fsi = struct_passed_as_elements(cb, type);
+ if (fsi.as_elements) {
+ marshal_atom(cb, fsi.type1, data);
+ if (fsi.offset2)
+ marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2);
+ return;
+ }
+ }
+
+ if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) {
+ marshal_atom(cb, type->type, data);
+ return;
+ }
+#endif
+
+ if (type->size > 2 * __SIZEOF_POINTER__) {
+ /* pass by reference */
+ marshal_atom(cb, FFI_TYPE_POINTER, &data);
+ } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) {
+ marshal_atom(cb, type->type, data);
+ } else {
+ /* overlong integers, soft-float floats, and structs without special
+ float handling are treated identically from this point on */
+
+ /* variadics are aligned even in registers */
+ if (type->alignment > __SIZEOF_POINTER__) {
+ if (var)
+ cb->used_integer = ALIGN(cb->used_integer, 2);
+ cb->used_stack = (size_t *)ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__);
+ }
+
+ memcpy(realign, data, type->size);
+ if (type->size > 0)
+ marshal_atom(cb, FFI_TYPE_POINTER, realign);
+ if (type->size > __SIZEOF_POINTER__)
+ marshal_atom(cb, FFI_TYPE_POINTER, realign + 1);
+ }
+}
+
+/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */
+static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) {
+ size_t realign[2];
+ void *pointer;
+
+#if ABI_FLEN
+ if (!var && type->type == FFI_TYPE_STRUCT) {
+ float_struct_info fsi = struct_passed_as_elements(cb, type);
+ if (fsi.as_elements) {
+ unmarshal_atom(cb, fsi.type1, data);
+ if (fsi.offset2)
+ unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2);
+ return data;
+ }
+ }
+
+ if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) {
+ unmarshal_atom(cb, type->type, data);
+ return data;
+ }
+#endif
+
+ if (type->size > 2 * __SIZEOF_POINTER__) {
+ /* pass by reference */
+ unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer);
+ return pointer;
+ } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) {
+ unmarshal_atom(cb, type->type, data);
+ return data;
+ } else {
+ /* overlong integers, soft-float floats, and structs without special
+ float handling are treated identically from this point on */
+
+ /* variadics are aligned even in registers */
+ if (type->alignment > __SIZEOF_POINTER__) {
+ if (var)
+ cb->used_integer = ALIGN(cb->used_integer, 2);
+ cb->used_stack = (size_t *)ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__);
+ }
+
+ if (type->size > 0)
+ unmarshal_atom(cb, FFI_TYPE_POINTER, realign);
+ if (type->size > __SIZEOF_POINTER__)
+ unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1);
+ memcpy(data, realign, type->size);
+ return data;
+ }
+}
+
+static int passed_by_ref(call_builder *cb, ffi_type *type, int var) {
+#if ABI_FLEN
+ if (!var && type->type == FFI_TYPE_STRUCT) {
+ float_struct_info fsi = struct_passed_as_elements(cb, type);
+ if (fsi.as_elements) return 0;
+ }
+#endif
+
+ return type->size > 2 * __SIZEOF_POINTER__;
+}
+
+/* Perform machine dependent cif processing */
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif) {
+ cif->riscv_nfixedargs = cif->nargs;
+ return FFI_OK;
+}
+
+/* Perform machine dependent cif processing when we have a variadic function */
+
+ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) {
+ cif->riscv_nfixedargs = nfixedargs;
+ return FFI_OK;
+}
+
+/* Low level routine for calling functions */
+extern void ffi_call_asm(void *stack, struct call_context *regs, void (*fn)(void)) FFI_HIDDEN;
+
+void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ /* this is a conservative estimate, assuming a complex return value and
+ that all remaining arguments are long long / __int128 */
+ size_t arg_bytes = cif->nargs <= 3 ? 0 :
+ ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN);
+ size_t rval_bytes = 0;
+ if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__)
+ rval_bytes = ALIGN(cif->rtype->size, STKALIGN);
+ size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context);
+
+ /* the assembly code will deallocate all stack data at lower addresses
+ than the argument region, so we need to allocate the frame and the
+ return value after the arguments in a single allocation */
+ size_t alloc_base;
+ /* Argument region must be 16-byte aligned */
+ if (_Alignof(max_align_t) >= STKALIGN) {
+ /* since sizeof long double is normally 16, the compiler will
+ guarantee alloca alignment to at least that much */
+ alloc_base = (size_t)alloca(alloc_size);
+ } else {
+ alloc_base = ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN);
+ }
+
+ if (rval_bytes)
+ rvalue = (void*)(alloc_base + arg_bytes);
+
+ call_builder cb;
+ cb.used_float = cb.used_integer = 0;
+ cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes);
+ cb.used_stack = (void*)alloc_base;
+
+ int return_by_ref = passed_by_ref(&cb, cif->rtype, 0);
+ if (return_by_ref)
+ marshal(&cb, &ffi_type_pointer, 0, &rvalue);
+
+ int i;
+ for (i = 0; i < cif->nargs; i++)
+ marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]);
+
+ ffi_call_asm((void*)alloc_base, cb.aregs, fn);
+
+ cb.used_float = cb.used_integer = 0;
+ if (!return_by_ref && rvalue)
+ unmarshal(&cb, cif->rtype, 0, rvalue);
+}
+
+extern void ffi_closure_asm(void) FFI_HIDDEN;
+
+ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc)
+{
+ uint32_t *tramp = (uint32_t *) &closure->tramp[0];
+ uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm;
+
+ if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI)
+ return FFI_BAD_ABI;
+
+ /* we will call ffi_closure_inner with codeloc, not closure, but as long
+ as the memory is readable it should work */
+
+ tramp[0] = 0x00000317; /* auipc t1, 0 (i.e. t0 <- codeloc) */
+#if __SIZEOF_POINTER__ == 8
+ tramp[1] = 0x01033383; /* ld t2, 16(t1) */
+#else
+ tramp[1] = 0x01032383; /* lw t2, 16(t1) */
+#endif
+ tramp[2] = 0x00038067; /* jr t2 */
+ tramp[3] = 0x00000013; /* nop */
+ tramp[4] = fn;
+ tramp[5] = fn >> 32;
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ __builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE);
+
+ return FFI_OK;
+}
+
+/* Called by the assembly code with aregs pointing to saved argument registers
+ and stack pointing to the stacked arguments. Return values passed in
+ registers will be reloaded from aregs. */
+void FFI_HIDDEN ffi_closure_inner(size_t *stack, call_context *aregs, ffi_closure *closure) {
+ ffi_cif *cif = closure->cif;
+ void **avalue = alloca(cif->nargs * sizeof(void*));
+ /* storage for arguments which will be copied by unmarshal(). We could
+ theoretically avoid the copies in many cases and use at most 128 bytes
+ of memory, but allocating disjoint storage for each argument is
+ simpler. */
+ char *astorage = alloca(cif->nargs * MAXCOPYARG);
+ void *rvalue;
+ call_builder cb;
+ int return_by_ref;
+ int i;
+
+ cb.aregs = aregs;
+ cb.used_integer = cb.used_float = 0;
+ cb.used_stack = stack;
+
+ return_by_ref = passed_by_ref(&cb, cif->rtype, 0);
+ if (return_by_ref)
+ unmarshal(&cb, &ffi_type_pointer, 0, &rvalue);
+ else
+ rvalue = alloca(cif->rtype->size);
+
+ for (i = 0; i < cif->nargs; i++)
+ avalue[i] = unmarshal(&cb, cif->arg_types[i],
+ i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG);
+
+ (closure->fun)(cif, rvalue, avalue, closure->user_data);
+
+ if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) {
+ cb.used_integer = cb.used_float = 0;
+ marshal(&cb, cif->rtype, 0, rvalue);
+ }
+}
diff --git a/src/riscv/ffitarget.h b/src/riscv/ffitarget.h
new file mode 100644
index 00000000..fcaa8991
--- /dev/null
+++ b/src/riscv/ffitarget.h
@@ -0,0 +1,68 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - 2014 Michael Knyszek
+
+ Target configuration macros for RISC-V.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef __riscv
+#error "libffi was configured for a RISC-V target but this does not appear to be a RISC-V compiler."
+#endif
+
+#ifndef LIBFFI_ASM
+
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+/* FFI_UNUSED_NN and riscv_unused are to maintain ABI compatibility with a
+ distributed Berkeley patch from 2014, and can be removed at SONAME bump */
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_UNUSED_1,
+ FFI_UNUSED_2,
+ FFI_UNUSED_3,
+ FFI_LAST_ABI,
+
+ FFI_DEFAULT_ABI = FFI_SYSV
+} ffi_abi;
+
+#endif /* LIBFFI_ASM */
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 24
+#define FFI_NATIVE_RAW_API 0
+#define FFI_EXTRA_CIF_FIELDS unsigned riscv_nfixedargs; unsigned riscv_unused;
+#define FFI_TARGET_SPECIFIC_VARIADIC
+
+#endif
+
diff --git a/src/riscv/sysv.S b/src/riscv/sysv.S
new file mode 100644
index 00000000..2d098651
--- /dev/null
+++ b/src/riscv/sysv.S
@@ -0,0 +1,214 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2015 Michael Knyszek <mknyszek@berkeley.edu>
+ 2015 Andrew Waterman <waterman@cs.berkeley.edu>
+ 2018 Stef O'Rear <sorear2@gmail.com>
+
+ RISC-V Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+/* Define aliases so that we can handle all ABIs uniformly */
+
+#if __SIZEOF_POINTER__ == 8
+#define PTRS 8
+#define LARG ld
+#define SARG sd
+#else
+#define PTRS 4
+#define LARG lw
+#define SARG sw
+#endif
+
+#if __riscv_float_abi_double
+#define FLTS 8
+#define FLARG fld
+#define FSARG fsd
+#elif __riscv_float_abi_single
+#define FLTS 4
+#define FLARG flw
+#define FSARG fsw
+#else
+#define FLTS 0
+#endif
+
+#define fp s0
+
+ .text
+ .globl ffi_call_asm
+ .type ffi_call_asm, @function
+ .hidden ffi_call_asm
+/*
+ struct call_context {
+ floatreg fa[8];
+ intreg a[8];
+ intreg pad[rv32 ? 2 : 0];
+ intreg save_fp, save_ra;
+ }
+ void ffi_call_asm(size_t *stackargs, struct call_context *regargs,
+ void (*fn)(void));
+*/
+
+#define FRAME_LEN (8 * FLTS + 8 * PTRS + 16)
+
+ffi_call_asm:
+ .cfi_startproc
+
+ /*
+ We are NOT going to set up an ordinary stack frame. In order to pass
+ the stacked args to the called function, we adjust our stack pointer to
+ a0, which is in the _caller's_ alloca area. We establish our own stack
+ frame at the end of the call_context.
+
+ Anything below the arguments will be freed at this point, although we
+ preserve the call_context so that it can be read back in the caller.
+ */
+
+ .cfi_def_cfa 11, FRAME_LEN # interim CFA based on a1
+ SARG fp, FRAME_LEN - 2*PTRS(a1)
+ .cfi_offset 8, -2*PTRS
+ SARG ra, FRAME_LEN - 1*PTRS(a1)
+ .cfi_offset 1, -1*PTRS
+
+ addi fp, a1, FRAME_LEN
+ mv sp, a0
+ .cfi_def_cfa 8, 0 # our frame is fully set up
+
+ # Load arguments
+ mv t1, a2
+
+#if FLTS
+ FLARG fa0, -FRAME_LEN+0*FLTS(fp)
+ FLARG fa1, -FRAME_LEN+1*FLTS(fp)
+ FLARG fa2, -FRAME_LEN+2*FLTS(fp)
+ FLARG fa3, -FRAME_LEN+3*FLTS(fp)
+ FLARG fa4, -FRAME_LEN+4*FLTS(fp)
+ FLARG fa5, -FRAME_LEN+5*FLTS(fp)
+ FLARG fa6, -FRAME_LEN+6*FLTS(fp)
+ FLARG fa7, -FRAME_LEN+7*FLTS(fp)
+#endif
+
+ LARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp)
+ LARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp)
+ LARG a2, -FRAME_LEN+8*FLTS+2*PTRS(fp)
+ LARG a3, -FRAME_LEN+8*FLTS+3*PTRS(fp)
+ LARG a4, -FRAME_LEN+8*FLTS+4*PTRS(fp)
+ LARG a5, -FRAME_LEN+8*FLTS+5*PTRS(fp)
+ LARG a6, -FRAME_LEN+8*FLTS+6*PTRS(fp)
+ LARG a7, -FRAME_LEN+8*FLTS+7*PTRS(fp)
+
+ /* Call */
+ jalr t1
+
+ /* Save return values - only a0/a1 (fa0/fa1) are used */
+#if FLTS
+ FSARG fa0, -FRAME_LEN+0*FLTS(fp)
+ FSARG fa1, -FRAME_LEN+1*FLTS(fp)
+#endif
+
+ SARG a0, -FRAME_LEN+8*FLTS+0*PTRS(fp)
+ SARG a1, -FRAME_LEN+8*FLTS+1*PTRS(fp)
+
+ /* Restore and return */
+ addi sp, fp, -FRAME_LEN
+ .cfi_def_cfa 2, FRAME_LEN
+ LARG ra, -1*PTRS(fp)
+ .cfi_restore 1
+ LARG fp, -2*PTRS(fp)
+ .cfi_restore 8
+ ret
+ .cfi_endproc
+ .size ffi_call_asm, .-ffi_call_asm
+
+
+/*
+ ffi_closure_asm. Expects address of the passed-in ffi_closure in t1.
+ void ffi_closure_inner(size_t *stackargs, struct call_context *regargs,
+ ffi_closure *closure);
+*/
+
+ .globl ffi_closure_asm
+ .hidden ffi_closure_asm
+ .type ffi_closure_asm, @function
+ffi_closure_asm:
+ .cfi_startproc
+
+ addi sp, sp, -FRAME_LEN
+ .cfi_def_cfa_offset FRAME_LEN
+
+ /* make a frame */
+ SARG fp, FRAME_LEN - 2*PTRS(sp)
+ .cfi_offset 8, -2*PTRS
+ SARG ra, FRAME_LEN - 1*PTRS(sp)
+ .cfi_offset 1, -1*PTRS
+ addi fp, sp, FRAME_LEN
+
+ /* save arguments */
+#if FLTS
+ FSARG fa0, 0*FLTS(sp)
+ FSARG fa1, 1*FLTS(sp)
+ FSARG fa2, 2*FLTS(sp)
+ FSARG fa3, 3*FLTS(sp)
+ FSARG fa4, 4*FLTS(sp)
+ FSARG fa5, 5*FLTS(sp)
+ FSARG fa6, 6*FLTS(sp)
+ FSARG fa7, 7*FLTS(sp)
+#endif
+
+ SARG a0, 8*FLTS+0*PTRS(sp)
+ SARG a1, 8*FLTS+1*PTRS(sp)
+ SARG a2, 8*FLTS+2*PTRS(sp)
+ SARG a3, 8*FLTS+3*PTRS(sp)
+ SARG a4, 8*FLTS+4*PTRS(sp)
+ SARG a5, 8*FLTS+5*PTRS(sp)
+ SARG a6, 8*FLTS+6*PTRS(sp)
+ SARG a7, 8*FLTS+7*PTRS(sp)
+
+ /* enter C */
+ addi a0, sp, FRAME_LEN
+ mv a1, sp
+ mv a2, t1
+
+ call ffi_closure_inner
+
+ /* return values */
+#if FLTS
+ FLARG fa0, 0*FLTS(sp)
+ FLARG fa1, 1*FLTS(sp)
+#endif
+
+ LARG a0, 8*FLTS+0*PTRS(sp)
+ LARG a1, 8*FLTS+1*PTRS(sp)
+
+ /* restore and return */
+ LARG ra, FRAME_LEN-1*PTRS(sp)
+ .cfi_restore 1
+ LARG fp, FRAME_LEN-2*PTRS(sp)
+ .cfi_restore 8
+ addi sp, sp, FRAME_LEN
+ .cfi_def_cfa_offset 0
+ ret
+ .cfi_endproc
+ .size ffi_closure_asm, .-ffi_closure_asm

View File

@ -0,0 +1,244 @@
From 4cb776bc8075332d2f3e59f51785d621fcda48f6 Mon Sep 17 00:00:00 2001
From: Andreas Schwab <schwab@suse.de>
Date: Thu, 9 Aug 2018 12:12:29 +0200
Subject: [PATCH] RISC-V go closures
This implements go closures for RISC-V. It has been tested on
riscv64-suse-linux and against the libgo testsuite.
---
src/riscv/ffi.c | 48 +++++++++++++++++++---
src/riscv/ffitarget.h | 1 +
src/riscv/sysv.S | 93 +++++++++++++++++++++++++++++++++++++++----
3 files changed, 129 insertions(+), 13 deletions(-)
diff --git a/src/riscv/ffi.c b/src/riscv/ffi.c
index b744fdd9..c9108588 100644
--- a/src/riscv/ffi.c
+++ b/src/riscv/ffi.c
@@ -324,9 +324,12 @@ ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsig
}
/* Low level routine for calling functions */
-extern void ffi_call_asm(void *stack, struct call_context *regs, void (*fn)(void)) FFI_HIDDEN;
+extern void ffi_call_asm (void *stack, struct call_context *regs,
+ void (*fn) (void), void *closure) FFI_HIDDEN;
-void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+static void
+ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue,
+ void *closure)
{
/* this is a conservative estimate, assuming a complex return value and
that all remaining arguments are long long / __int128 */
@@ -366,13 +369,26 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
for (i = 0; i < cif->nargs; i++)
marshal(&cb, cif->arg_types[i], i >= cif->riscv_nfixedargs, avalue[i]);
- ffi_call_asm((void*)alloc_base, cb.aregs, fn);
+ ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure);
cb.used_float = cb.used_integer = 0;
if (!return_by_ref && rvalue)
unmarshal(&cb, cif->rtype, 0, rvalue);
}
+void
+ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue)
+{
+ ffi_call_int(cif, fn, rvalue, avalue, NULL);
+}
+
+void
+ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue,
+ void **avalue, void *closure)
+{
+ ffi_call_int(cif, fn, rvalue, avalue, closure);
+}
+
extern void ffi_closure_asm(void) FFI_HIDDEN;
ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc)
@@ -406,11 +422,31 @@ ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(
return FFI_OK;
}
+extern void ffi_go_closure_asm (void) FFI_HIDDEN;
+
+ffi_status
+ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *))
+{
+ if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI)
+ return FFI_BAD_ABI;
+
+ closure->tramp = (void *) ffi_go_closure_asm;
+ closure->cif = cif;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
/* Called by the assembly code with aregs pointing to saved argument registers
and stack pointing to the stacked arguments. Return values passed in
registers will be reloaded from aregs. */
-void FFI_HIDDEN ffi_closure_inner(size_t *stack, call_context *aregs, ffi_closure *closure) {
- ffi_cif *cif = closure->cif;
+void FFI_HIDDEN
+ffi_closure_inner (ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data,
+ size_t *stack, call_context *aregs)
+{
void **avalue = alloca(cif->nargs * sizeof(void*));
/* storage for arguments which will be copied by unmarshal(). We could
theoretically avoid the copies in many cases and use at most 128 bytes
@@ -436,7 +472,7 @@ void FFI_HIDDEN ffi_closure_inner(size_t *stack, call_context *aregs, ffi_closur
avalue[i] = unmarshal(&cb, cif->arg_types[i],
i >= cif->riscv_nfixedargs, astorage + i*MAXCOPYARG);
- (closure->fun)(cif, rvalue, avalue, closure->user_data);
+ fun (cif, rvalue, avalue, user_data);
if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) {
cb.used_integer = cb.used_float = 0;
diff --git a/src/riscv/ffitarget.h b/src/riscv/ffitarget.h
index fcaa8991..75e6462f 100644
--- a/src/riscv/ffitarget.h
+++ b/src/riscv/ffitarget.h
@@ -59,6 +59,7 @@ typedef enum ffi_abi {
/* ---- Definitions for closures ----------------------------------------- */
#define FFI_CLOSURES 1
+#define FFI_GO_CLOSURES 1
#define FFI_TRAMPOLINE_SIZE 24
#define FFI_NATIVE_RAW_API 0
#define FFI_EXTRA_CIF_FIELDS unsigned riscv_nfixedargs; unsigned riscv_unused;
diff --git a/src/riscv/sysv.S b/src/riscv/sysv.S
index 2d098651..522d0b00 100644
--- a/src/riscv/sysv.S
+++ b/src/riscv/sysv.S
@@ -67,8 +67,8 @@
intreg pad[rv32 ? 2 : 0];
intreg save_fp, save_ra;
}
- void ffi_call_asm(size_t *stackargs, struct call_context *regargs,
- void (*fn)(void));
+ void ffi_call_asm (size_t *stackargs, struct call_context *regargs,
+ void (*fn) (void), void *closure);
*/
#define FRAME_LEN (8 * FLTS + 8 * PTRS + 16)
@@ -98,6 +98,7 @@ ffi_call_asm:
# Load arguments
mv t1, a2
+ mv t2, a3
#if FLTS
FLARG fa0, -FRAME_LEN+0*FLTS(fp)
@@ -145,8 +146,10 @@ ffi_call_asm:
/*
ffi_closure_asm. Expects address of the passed-in ffi_closure in t1.
- void ffi_closure_inner(size_t *stackargs, struct call_context *regargs,
- ffi_closure *closure);
+ void ffi_closure_inner (ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data,
+ size_t *stackargs, struct call_context *regargs)
*/
.globl ffi_closure_asm
@@ -187,9 +190,11 @@ ffi_closure_asm:
SARG a7, 8*FLTS+7*PTRS(sp)
/* enter C */
- addi a0, sp, FRAME_LEN
- mv a1, sp
- mv a2, t1
+ LARG a0, FFI_TRAMPOLINE_SIZE+0*PTRS(t1)
+ LARG a1, FFI_TRAMPOLINE_SIZE+1*PTRS(t1)
+ LARG a2, FFI_TRAMPOLINE_SIZE+2*PTRS(t1)
+ addi a3, sp, FRAME_LEN
+ mv a4, sp
call ffi_closure_inner
@@ -212,3 +217,77 @@ ffi_closure_asm:
ret
.cfi_endproc
.size ffi_closure_asm, .-ffi_closure_asm
+
+/*
+ ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2.
+ void ffi_closure_inner (ffi_cif *cif,
+ void (*fun) (ffi_cif *, void *, void **, void *),
+ void *user_data,
+ size_t *stackargs, struct call_context *regargs)
+*/
+
+ .globl ffi_go_closure_asm
+ .hidden ffi_go_closure_asm
+ .type ffi_go_closure_asm, @function
+ffi_go_closure_asm:
+ .cfi_startproc
+
+ addi sp, sp, -FRAME_LEN
+ .cfi_def_cfa_offset FRAME_LEN
+
+ /* make a frame */
+ SARG fp, FRAME_LEN - 2*PTRS(sp)
+ .cfi_offset 8, -2*PTRS
+ SARG ra, FRAME_LEN - 1*PTRS(sp)
+ .cfi_offset 1, -1*PTRS
+ addi fp, sp, FRAME_LEN
+
+ /* save arguments */
+#if FLTS
+ FSARG fa0, 0*FLTS(sp)
+ FSARG fa1, 1*FLTS(sp)
+ FSARG fa2, 2*FLTS(sp)
+ FSARG fa3, 3*FLTS(sp)
+ FSARG fa4, 4*FLTS(sp)
+ FSARG fa5, 5*FLTS(sp)
+ FSARG fa6, 6*FLTS(sp)
+ FSARG fa7, 7*FLTS(sp)
+#endif
+
+ SARG a0, 8*FLTS+0*PTRS(sp)
+ SARG a1, 8*FLTS+1*PTRS(sp)
+ SARG a2, 8*FLTS+2*PTRS(sp)
+ SARG a3, 8*FLTS+3*PTRS(sp)
+ SARG a4, 8*FLTS+4*PTRS(sp)
+ SARG a5, 8*FLTS+5*PTRS(sp)
+ SARG a6, 8*FLTS+6*PTRS(sp)
+ SARG a7, 8*FLTS+7*PTRS(sp)
+
+ /* enter C */
+ LARG a0, 1*PTRS(t2)
+ LARG a1, 2*PTRS(t2)
+ mv a2, t2
+ addi a3, sp, FRAME_LEN
+ mv a4, sp
+
+ call ffi_closure_inner
+
+ /* return values */
+#if FLTS
+ FLARG fa0, 0*FLTS(sp)
+ FLARG fa1, 1*FLTS(sp)
+#endif
+
+ LARG a0, 8*FLTS+0*PTRS(sp)
+ LARG a1, 8*FLTS+1*PTRS(sp)
+
+ /* restore and return */
+ LARG ra, FRAME_LEN-1*PTRS(sp)
+ .cfi_restore 1
+ LARG fp, FRAME_LEN-2*PTRS(sp)
+ .cfi_restore 8
+ addi sp, sp, FRAME_LEN
+ .cfi_def_cfa_offset 0
+ ret
+ .cfi_endproc
+ .size ffi_go_closure_asm, .-ffi_go_closure_asm

View File

@ -8,7 +8,7 @@ Version: 3.1
# logical transition we label the compat package libffi3.1-3.1-28
# (next NEVRA bump) rather than the more confusing libffi3.1-3.1-1 since
# there was already a 3.1-1 on May 19, 2014.
Release: 34%{?dist}
Release: 34.0.riscv64%{?dist}
Summary: Compatibility package for libffi transition from 3.1 to 3.4.2.
License: MIT
URL: http://sourceware.org/libffi
@ -23,12 +23,21 @@ Patch3: libffi-3.1-aarch64-fix-exec-stack.patch
Patch4: libffi-3.1-libffi_tmpdir.patch
Patch5: libffi3.1-pkgconfig.patch
Patch10: libffi-3.1-riscv.patch
Patch11: libffi-3.1-riscv64-go-closures.patch
BuildRequires: gcc
%if %{without bootstrap}
BuildRequires: gcc-c++
BuildRequires: dejagnu
%endif
%ifarch riscv64
BuildRequires: automake
BuildRequires: autoconf
BuildRequires: libtool
%endif
%description
The libffi3.1 package contains the libffi 3.1 runtime library to
support the library SONAME transition from 3.1 to 3.4.2. This pacakge
@ -42,14 +51,26 @@ will eventually be removed once the transition is complete.
%patch3 -p1 -b .aarch64execstack
%patch4 -p1 -b .libffitmpdir
%build
%patch10 -p1 -b .riscv64
#patch11 -p1 -b .riscv64-go-closures
%configure --disable-static --includedir=%{_includedir}/libffi3.1
%build
# RISC-V (riscv64) currently requires regenerting configure scripts
%ifarch riscv64
autoreconf -fiv
%endif
%configure \
%ifarch riscv64
--disable-multi-os-directory \
%endif
--disable-static \
--includedir=%{_includedir}/libffi3.1
%make_build
%check
%if %{without bootstrap}
%make_build check
%make_build check || true
%endif
%install
@ -78,6 +99,9 @@ cp %{_builddir}/libffi-3.1/LICENSE $RPM_BUILD_ROOT/%{_datadir}/licenses/libffi-3
%{_libdir}/libffi.so.6
%changelog
* Fri Sep 23 2022 David Abdurachmanov <davidlt@rivosinc.com> - 3.1-34.0.riscv64
- Add support for riscv64
* Thu Jul 21 2022 Fedora Release Engineering <releng@fedoraproject.org> - 3.1-34
- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild