From 4cc1aa669a02efccaae105a73da65ac8af2a367c Mon Sep 17 00:00:00 2001 From: Orion Poplawski Date: Wed, 15 Mar 2017 11:58:50 -0600 Subject: [PATCH 1/9] Drop %defattr() --- openblas.spec | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/openblas.spec b/openblas.spec index de58d32..cbbcdd9 100644 --- a/openblas.spec +++ b/openblas.spec @@ -578,56 +578,45 @@ rm -rf %{buildroot}%{_libdir}/cmake rm -rf %{buildroot} %files -# DO NOT REMOVE %defattr SECTIONS! -%defattr(-,root,root,-) %doc serial/Changelog.txt serial/GotoBLAS* serial/LICENSE %{_libdir}/lib%{name}-*.so %{_libdir}/lib%{name}.so.* %files openmp -%defattr(-,root,root,-) %{_libdir}/lib%{name}o-*.so %{_libdir}/lib%{name}o.so.* %files threads -%defattr(-,root,root,-) %{_libdir}/lib%{name}p-*.so %{_libdir}/lib%{name}p.so.* %if %build64 %files serial64 -%defattr(-,root,root,-) %{_libdir}/lib%{name}64-*.so %{_libdir}/lib%{name}64.so.* %files openmp64 -%defattr(-,root,root,-) %{_libdir}/lib%{name}o64-*.so %{_libdir}/lib%{name}o64.so.* %files threads64 -%defattr(-,root,root,-) %{_libdir}/lib%{name}p64-*.so %{_libdir}/lib%{name}p64.so.* %files serial64_ -%defattr(-,root,root,-) %{_libdir}/lib%{name}64_-*.so %{_libdir}/lib%{name}64_.so.* %files openmp64_ -%defattr(-,root,root,-) %{_libdir}/lib%{name}o64_-*.so %{_libdir}/lib%{name}o64_.so.* %files threads64_ -%defattr(-,root,root,-) %{_libdir}/lib%{name}p64_-*.so %{_libdir}/lib%{name}p64_.so.* %endif %files devel -%defattr(-,root,root,-) %{_includedir}/%{name}/ %{_libdir}/lib%{name}.so %{_libdir}/lib%{name}o.so @@ -642,11 +631,9 @@ rm -rf %{buildroot} %endif %files Rblas -%defattr(-,root,root,-) %{_libdir}/R/lib/libRblas.so %files static -%defattr(-,root,root,-) %{_libdir}/lib%{name}.a %{_libdir}/lib%{name}o.a %{_libdir}/lib%{name}p.a From c98c022805fc47dc660434eef5f406a550bae506 Mon Sep 17 00:00:00 2001 From: Orion Poplawski Date: Wed, 15 Mar 2017 11:57:50 -0600 Subject: [PATCH 2/9] Define %penblas_arches for dependent packages to use --- openblas.spec | 28 ++++++++++++++++++++++++++-- openblas_arches | 1 + 2 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 openblas_arches diff --git a/openblas.spec b/openblas.spec index cbbcdd9..17b6bdf 100644 --- a/openblas.spec +++ b/openblas.spec @@ -15,12 +15,14 @@ Name: openblas Version: 0.2.19 -Release: 7%{?dist} +Release: 8%{?dist} Summary: An optimized BLAS library based on GotoBLAS2 Group: Development/Libraries License: BSD URL: https://github.com/xianyi/OpenBLAS/ Source0: https://github.com/xianyi/OpenBLAS/archive/v%{version}.tar.gz +# This contains the list of arches that openblas works on +Source1: openblas_arches # Use system lapack Patch0: openblas-0.2.15-system_lapack.patch # Drop extra p from threaded library name @@ -91,7 +93,8 @@ BuildRequires: lapack64-static # Upstream supports the package only on these architectures. # Runtime processor detection is not available on other archs. -ExclusiveArch: x86_64 %{ix86} armv7hl %{power64} aarch64 +%global openblas_arches %(cat %SOURCE1) +ExclusiveArch: %{openblas_arches} %global base_description \ OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD \ @@ -204,6 +207,7 @@ Requires: %{name}-openmp64_%{?_isa} = %{version}-%{release} Requires: %{name}-threads64_%{?_isa} = %{version}-%{release} Requires: %{name}-serial64_%{?_isa} = %{version}-%{release} %endif +Requires: %{name}-srpm-macros = %{version}-%{release} %description devel %{base_description} @@ -220,6 +224,14 @@ Requires: %{name}-devel%{?_isa} = %{version}-%{release} This package contains the static libraries. +%package srpm-macros +Summary: RPM macros for building source packages +BuildArch: noarch + +%description srpm-macros +RPM macros for building source packages. + + %prep %setup -q -c -T @@ -545,6 +557,12 @@ done # Get rid of generated CMake config rm -rf %{buildroot}%{_libdir}/cmake +# rpm macro +%global macrosdir %(d=%{_rpmconfigdir}/macros.d; [ -d $d ] || d=%{_sysconfdir}/rpm; echo $d) +mkdir -p %{buildroot}%{macrosdir} +# Avoid expansion of macros inside %%openblas_arches by catting the source again +echo %%openblas_arches $(cat %SOURCE1) > %{buildroot}%{macrosdir}/macros.%{name}-srpm + %post -p /sbin/ldconfig %postun -p /sbin/ldconfig @@ -630,6 +648,9 @@ rm -rf %{buildroot} %{_libdir}/lib%{name}p64_.so %endif +%files srpm-macros +%{macrosdir}/macros.%{name}-srpm + %files Rblas %{_libdir}/R/lib/libRblas.so @@ -647,6 +668,9 @@ rm -rf %{buildroot} %endif %changelog +* Wed Mar 15 2017 Orion Poplawski - 0.2.19-8 +- Define %%openblas_arches for dependent packages to use + * Mon Feb 13 2017 Björn Esser - 0.2.19-7 - Upgrade Patch4 to hopefully fully fix the issues on PPC64LE diff --git a/openblas_arches b/openblas_arches new file mode 100644 index 0000000..82e1f3f --- /dev/null +++ b/openblas_arches @@ -0,0 +1 @@ +x86_64 %{ix86} armv7hl %{power64} aarch64 From bdce7d2e6bdbdb7aa9c4a6317c08c44812317a6e Mon Sep 17 00:00:00 2001 From: Orion Poplawski Date: Mon, 20 Mar 2017 16:12:59 -0600 Subject: [PATCH 3/9] Move openblas-srpm-macros to separate package --- openblas.spec | 24 ++++-------------------- openblas_arches | 1 - 2 files changed, 4 insertions(+), 21 deletions(-) delete mode 100644 openblas_arches diff --git a/openblas.spec b/openblas.spec index 17b6bdf..6418ec0 100644 --- a/openblas.spec +++ b/openblas.spec @@ -15,14 +15,12 @@ Name: openblas Version: 0.2.19 -Release: 8%{?dist} +Release: 9%{?dist} Summary: An optimized BLAS library based on GotoBLAS2 Group: Development/Libraries License: BSD URL: https://github.com/xianyi/OpenBLAS/ Source0: https://github.com/xianyi/OpenBLAS/archive/v%{version}.tar.gz -# This contains the list of arches that openblas works on -Source1: openblas_arches # Use system lapack Patch0: openblas-0.2.15-system_lapack.patch # Drop extra p from threaded library name @@ -93,7 +91,6 @@ BuildRequires: lapack64-static # Upstream supports the package only on these architectures. # Runtime processor detection is not available on other archs. -%global openblas_arches %(cat %SOURCE1) ExclusiveArch: %{openblas_arches} %global base_description \ @@ -224,13 +221,6 @@ Requires: %{name}-devel%{?_isa} = %{version}-%{release} This package contains the static libraries. -%package srpm-macros -Summary: RPM macros for building source packages -BuildArch: noarch - -%description srpm-macros -RPM macros for building source packages. - %prep %setup -q -c -T @@ -557,12 +547,6 @@ done # Get rid of generated CMake config rm -rf %{buildroot}%{_libdir}/cmake -# rpm macro -%global macrosdir %(d=%{_rpmconfigdir}/macros.d; [ -d $d ] || d=%{_sysconfdir}/rpm; echo $d) -mkdir -p %{buildroot}%{macrosdir} -# Avoid expansion of macros inside %%openblas_arches by catting the source again -echo %%openblas_arches $(cat %SOURCE1) > %{buildroot}%{macrosdir}/macros.%{name}-srpm - %post -p /sbin/ldconfig %postun -p /sbin/ldconfig @@ -648,9 +632,6 @@ rm -rf %{buildroot} %{_libdir}/lib%{name}p64_.so %endif -%files srpm-macros -%{macrosdir}/macros.%{name}-srpm - %files Rblas %{_libdir}/R/lib/libRblas.so @@ -668,6 +649,9 @@ rm -rf %{buildroot} %endif %changelog +* Mon Mar 20 2017 Orion Poplawski - 0.2.19-9 +- Move openblas-srpm-macros to separate package + * Wed Mar 15 2017 Orion Poplawski - 0.2.19-8 - Define %%openblas_arches for dependent packages to use diff --git a/openblas_arches b/openblas_arches deleted file mode 100644 index 82e1f3f..0000000 --- a/openblas_arches +++ /dev/null @@ -1 +0,0 @@ -x86_64 %{ix86} armv7hl %{power64} aarch64 From df7f8a1a6b4b1be1d31f8e5243b87b5ddaf10efe Mon Sep 17 00:00:00 2001 From: Orion Poplawski Date: Mon, 20 Mar 2017 21:02:56 -0600 Subject: [PATCH 4/9] Drop openblas-srpm-macros version requirement --- openblas.spec | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/openblas.spec b/openblas.spec index 6418ec0..83b3da8 100644 --- a/openblas.spec +++ b/openblas.spec @@ -15,7 +15,7 @@ Name: openblas Version: 0.2.19 -Release: 9%{?dist} +Release: 10%{?dist} Summary: An optimized BLAS library based on GotoBLAS2 Group: Development/Libraries License: BSD @@ -204,7 +204,7 @@ Requires: %{name}-openmp64_%{?_isa} = %{version}-%{release} Requires: %{name}-threads64_%{?_isa} = %{version}-%{release} Requires: %{name}-serial64_%{?_isa} = %{version}-%{release} %endif -Requires: %{name}-srpm-macros = %{version}-%{release} +Requires: %{name}-srpm-macros %description devel %{base_description} @@ -649,6 +649,9 @@ rm -rf %{buildroot} %endif %changelog +* Mon Mar 20 2017 Orion Poplawski - 0.2.19-10 +- Drop openblas-srpm-macros version requirement + * Mon Mar 20 2017 Orion Poplawski - 0.2.19-9 - Move openblas-srpm-macros to separate package From 2a39523bc3d49b6ba47895e4b5e47c56eb229598 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dan=20Hor=C3=A1k?= Date: Mon, 29 May 2017 16:04:28 +0200 Subject: [PATCH 5/9] - add generic s390x support (#1442048) --- openblas-0.2.19-s390x.patch | 4152 +++++++++++++++++++++++++++++++++++ openblas.spec | 11 +- 2 files changed, 4162 insertions(+), 1 deletion(-) create mode 100644 openblas-0.2.19-s390x.patch diff --git a/openblas-0.2.19-s390x.patch b/openblas-0.2.19-s390x.patch new file mode 100644 index 0000000..623637b --- /dev/null +++ b/openblas-0.2.19-s390x.patch @@ -0,0 +1,4152 @@ +From c4b61f74f18c674c69301122ba95bdbca6f55d0f Mon Sep 17 00:00:00 2001 +From: Zhang Xianyi +Date: Fri, 15 Apr 2016 18:02:24 -0400 +Subject: [PATCH 1/6] Init IBM z system (s390x) porting. + +(cherry picked from commit dd43661cfd5d3de6e9fe804587b89f1094c85e41) +--- + Makefile.zarch | 6 ++ + c_check | 8 +++ + common.h | 4 ++ + common_linux.h | 4 +- + common_zarch.h | 139 ++++++++++++++++++++++++++++++++++++++ + cpuid_zarch.c | 91 +++++++++++++++++++++++++ + ctest.c | 4 ++ + getarch.c | 10 ++- + kernel/zarch/KERNEL | 30 ++++++++ + kernel/zarch/KERNEL.ZARCH_GENERIC | 134 ++++++++++++++++++++++++++++++++++++ + kernel/zarch/Makefile | 2 + + param.h | 39 +++++++++++ + 12 files changed, 467 insertions(+), 4 deletions(-) + create mode 100644 Makefile.zarch + create mode 100644 common_zarch.h + create mode 100644 cpuid_zarch.c + create mode 100644 kernel/zarch/KERNEL + create mode 100644 kernel/zarch/KERNEL.ZARCH_GENERIC + create mode 100644 kernel/zarch/Makefile + +diff --git a/Makefile.zarch b/Makefile.zarch +new file mode 100644 +index 00000000..138c5941 +--- /dev/null ++++ b/Makefile.zarch +@@ -0,0 +1,6 @@ ++ ++ifeq ($(CORE), Z13) ++CCOMMON_OPT += -march=z13 ++FCOMMON_OPT += -march=z13 ++endif ++ +diff --git a/c_check b/c_check +index 2ec9fc48..1bd52201 100644 +--- a/c_check ++++ b/c_check +@@ -10,6 +10,7 @@ $hostarch = "x86_64" if ($hostarch eq "amd64"); + $hostarch = "arm" if ($hostarch =~ /^arm.*/); + $hostarch = "arm64" if ($hostarch eq "aarch64"); + $hostarch = "power" if ($hostarch =~ /^(powerpc|ppc).*/); ++$hostarch = "zarch" if ($hostarch eq "s390x"); + + $tmpf = new File::Temp( UNLINK => 1 ); + $binary = $ENV{"BINARY"}; +@@ -72,6 +73,7 @@ $architecture = sparc if ($data =~ /ARCH_SPARC/); + $architecture = ia64 if ($data =~ /ARCH_IA64/); + $architecture = arm if ($data =~ /ARCH_ARM/); + $architecture = arm64 if ($data =~ /ARCH_ARM64/); ++$architecture = zarch if ($data =~ /ARCH_ZARCH/); + + $defined = 0; + +@@ -96,6 +98,11 @@ if (($architecture eq "arm") || ($architecture eq "arm64")) { + $defined = 1; + } + ++if ($architecture eq "zarch") { ++ $defined = 1; ++ $binary = 64; ++} ++ + if ($architecture eq "alpha") { + $defined = 1; + $binary = 64; +@@ -187,6 +194,7 @@ $architecture = sparc if ($data =~ /ARCH_SPARC/); + $architecture = ia64 if ($data =~ /ARCH_IA64/); + $architecture = arm if ($data =~ /ARCH_ARM/); + $architecture = arm64 if ($data =~ /ARCH_ARM64/); ++$architecture = zarch if ($data =~ /ARCH_ZARCH/); + + $binformat = bin32; + $binformat = bin64 if ($data =~ /BINARY_64/); +diff --git a/common.h b/common.h +index 480174c1..b4acada3 100644 +--- a/common.h ++++ b/common.h +@@ -420,6 +420,10 @@ please https://github.com/xianyi/OpenBLAS/issues/246 + #include "common_arm64.h" + #endif + ++#ifdef ARCH_ZARCH ++#include "common_zarch.h" ++#endif ++ + #ifndef ASSEMBLER + #ifdef OS_WINDOWS + typedef char env_var_t[MAX_PATH]; +diff --git a/common_linux.h b/common_linux.h +index cab5e5f7..35f3fb65 100644 +--- a/common_linux.h ++++ b/common_linux.h +@@ -70,7 +70,7 @@ extern long int syscall (long int __sysno, ...); + static inline int my_mbind(void *addr, unsigned long len, int mode, + unsigned long *nodemask, unsigned long maxnode, + unsigned flags) { +-#if defined (__LSB_VERSION__) ++#if defined (__LSB_VERSION__) || defined(ARCH_ZARCH) + // So far, LSB (Linux Standard Base) don't support syscall(). + // https://lsbbugs.linuxfoundation.org/show_bug.cgi?id=3482 + return 0; +@@ -90,7 +90,7 @@ static inline int my_mbind(void *addr, unsigned long len, int mode, + } + + static inline int my_set_mempolicy(int mode, const unsigned long *addr, unsigned long flag) { +-#if defined (__LSB_VERSION__) ++#if defined (__LSB_VERSION__) || defined(ARCH_ZARCH) + // So far, LSB (Linux Standard Base) don't support syscall(). + // https://lsbbugs.linuxfoundation.org/show_bug.cgi?id=3482 + return 0; +diff --git a/common_zarch.h b/common_zarch.h +new file mode 100644 +index 00000000..7c04cf42 +--- /dev/null ++++ b/common_zarch.h +@@ -0,0 +1,139 @@ ++/***************************************************************************** ++Copyright (c) 2011-2016, The OpenBLAS Project ++All rights reserved. ++ ++Redistribution and use in source and binary forms, with or without ++modification, are permitted provided that the following conditions are ++met: ++ ++ 1. Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ 3. Neither the name of the OpenBLAS project nor the names of ++ its contributors may be used to endorse or promote products ++ derived from this software without specific prior written ++ permission. ++ ++THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE ++LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE ++USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++**********************************************************************************/ ++ ++#ifndef COMMON_ZARCH ++#define COMMON_ZARCH ++ ++#define MB ++//__asm__ __volatile__ ("dmb ish" : : : "memory") ++#define WMB ++//__asm__ __volatile__ ("dmb ishst" : : : "memory") ++ ++ ++#define INLINE inline ++ ++#define RETURN_BY_COMPLEX ++ ++#ifndef ASSEMBLER ++ ++ /* ++static void __inline blas_lock(volatile BLASULONG *address){ ++ ++ BLASULONG ret; ++ ++ do { ++ while (*address) {YIELDING;}; ++ ++ __asm__ __volatile__( ++ "mov x4, #1 \n\t" ++ "1: \n\t" ++ "ldaxr x2, [%1] \n\t" ++ "cbnz x2, 1b \n\t" ++ "2: \n\t" ++ "stxr w3, x4, [%1] \n\t" ++ "cbnz w3, 1b \n\t" ++ "mov %0, #0 \n\t" ++ : "=r"(ret), "=r"(address) ++ : "1"(address) ++ : "memory", "x2" , "x3", "x4" ++ ++ ++ ); ++ ++ ++ } while (ret); ++ ++} ++ */ ++//#define BLAS_LOCK_DEFINED ++ ++ ++ ++static inline int blas_quickdivide(blasint x, blasint y){ ++ return x / y; ++} ++ ++#if defined(DOUBLE) ++#define GET_IMAGE(res) __asm__ __volatile__("str d1, %0" : "=m"(res) : : "memory") ++#else ++#define GET_IMAGE(res) __asm__ __volatile__("str s1, %0" : "=m"(res) : : "memory") ++#endif ++ ++#define GET_IMAGE_CANCEL ++ ++#endif ++ ++ ++#ifndef F_INTERFACE ++#define REALNAME ASMNAME ++#else ++#define REALNAME ASMFNAME ++#endif ++ ++#if defined(ASSEMBLER) && !defined(NEEDPARAM) ++ ++#define PROLOGUE \ ++ .text ;\ ++ .align 4 ;\ ++ .global REALNAME ;\ ++ .type REALNAME, %function ;\ ++REALNAME: ++ ++#define EPILOGUE ++ ++#define PROFCODE ++ ++#endif ++ ++ ++#define SEEK_ADDRESS ++ ++#ifndef PAGESIZE ++#define PAGESIZE ( 4 << 10) ++#endif ++#define HUGE_PAGESIZE ( 4 << 20) ++ ++#if defined(CORTEXA57) ++#define BUFFER_SIZE (20 << 20) ++#else ++#define BUFFER_SIZE (16 << 20) ++#endif ++ ++ ++#define BASE_ADDRESS (START_ADDRESS - BUFFER_SIZE * MAX_CPU_NUMBER) ++ ++#ifndef MAP_ANONYMOUS ++#define MAP_ANONYMOUS MAP_ANON ++#endif ++ ++#endif ++ +diff --git a/cpuid_zarch.c b/cpuid_zarch.c +new file mode 100644 +index 00000000..248cd47e +--- /dev/null ++++ b/cpuid_zarch.c +@@ -0,0 +1,91 @@ ++/************************************************************************** ++ Copyright (c) 2016, The OpenBLAS Project ++ All rights reserved. ++ Redistribution and use in source and binary forms, with or without ++ modification, are permitted provided that the following conditions are ++ met: ++ 1. Redistributions of source code must retain the above copyright ++ notice, this list of conditions and the following disclaimer. ++ 2. Redistributions in binary form must reproduce the above copyright ++ notice, this list of conditions and the following disclaimer in ++ the documentation and/or other materials provided with the ++ distribution. ++ 3. Neither the name of the OpenBLAS project nor the names of ++ its contributors may be used to endorse or promote products ++ derived from this software without specific prior written permission. ++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE ++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE ++ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ *****************************************************************************/ ++ ++#include ++ ++#define CPU_GENERIC 0 ++#define CPU_Z13 1 ++ ++static char *cpuname[] = { ++ "ZARCH_GENERIC", ++ "Z13" ++}; ++ ++static char *cpuname_lower[] = { ++ "zarch_generic", ++ "z13" ++}; ++ ++int detect(void) ++{ ++ return CPU_GENERIC; ++} ++ ++void get_libname(void) ++{ ++ ++ int d = detect(); ++ printf("%s", cpuname_lower[d]); ++} ++ ++char *get_corename(void) ++{ ++ return cpuname[detect()]; ++} ++ ++void get_architecture(void) ++{ ++ printf("ZARCH"); ++} ++ ++void get_subarchitecture(void) ++{ ++ int d = detect(); ++ printf("%s", cpuname[d]); ++} ++ ++void get_subdirname(void) ++{ ++ printf("zarch"); ++} ++ ++ ++void get_cpuconfig(void) ++{ ++ ++ int d = detect(); ++ switch (d){ ++ case CPU_GENERIC: ++ printf("#define ZARCH_GENERIC\n"); ++ printf("#define DTB_DEFAULT_ENTRIES 64\n"); ++ break; ++ case CPU_Z13: ++ printf("#define Z13\n"); ++ printf("#define DTB_DEFAULT_ENTRIES 64\n"); ++ break; ++ } ++} +diff --git a/ctest.c b/ctest.c +index e0ef46e6..27d3b473 100644 +--- a/ctest.c ++++ b/ctest.c +@@ -105,6 +105,10 @@ ARCH_X86_64 + ARCH_POWER + #endif + ++#if defined(__s390x__) || defined(__zarch__) ++ARCH_ZARCH ++#endif ++ + #ifdef __mips64 + ARCH_MIPS64 + #endif +diff --git a/getarch.c b/getarch.c +index f8069e50..0d810e6c 100644 +--- a/getarch.c ++++ b/getarch.c +@@ -907,6 +907,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + #define OPENBLAS_SUPPORTED + #endif + ++#if defined(__zarch__) || defined(__s390x__) ++#define ZARCH ++#include "cpuid_zarch.c" ++#define OPENBLAS_SUPPORTED ++#endif ++ + #ifdef INTEL_AMD + #include "cpuid_x86.c" + #define OPENBLAS_SUPPORTED +@@ -1006,7 +1012,7 @@ int main(int argc, char *argv[]){ + #ifdef FORCE + printf("CORE=%s\n", CORENAME); + #else +-#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) ++#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) + printf("CORE=%s\n", get_corename()); + #endif + #endif +@@ -1113,7 +1119,7 @@ int main(int argc, char *argv[]){ + #ifdef FORCE + printf("#define CHAR_CORENAME \"%s\"\n", CORENAME); + #else +-#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) ++#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) + printf("#define CHAR_CORENAME \"%s\"\n", get_corename()); + #endif + #endif +diff --git a/kernel/zarch/KERNEL b/kernel/zarch/KERNEL +new file mode 100644 +index 00000000..68d68b5f +--- /dev/null ++++ b/kernel/zarch/KERNEL +@@ -0,0 +1,30 @@ ++ifndef SCABS_KERNEL ++SCABS_KERNEL = ../generic/cabs.c ++endif ++ ++ifndef DCABS_KERNEL ++DCABS_KERNEL = ../generic/cabs.c ++endif ++ ++ifndef QCABS_KERNEL ++QCABS_KERNEL = ../generic/cabs.c ++endif ++ ++ifndef LSAME_KERNEL ++LSAME_KERNEL = ../generic/lsame.c ++endif ++ ++ifndef SGEMM_BETA ++SGEMM_BETA = ../generic/gemm_beta.c ++endif ++ifndef DGEMM_BETA ++DGEMM_BETA = ../generic/gemm_beta.c ++endif ++ifndef CGEMM_BETA ++CGEMM_BETA = ../generic/zgemm_beta.c ++endif ++ifndef ZGEMM_BETA ++ZGEMM_BETA = ../generic/zgemm_beta.c ++endif ++ ++ +diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC +new file mode 100644 +index 00000000..27157dad +--- /dev/null ++++ b/kernel/zarch/KERNEL.ZARCH_GENERIC +@@ -0,0 +1,134 @@ ++SAMAXKERNEL = ../arm/amax.c ++DAMAXKERNEL = ../arm/amax.c ++CAMAXKERNEL = ../arm/zamax.c ++ZAMAXKERNEL = ../arm/zamax.c ++ ++SAMINKERNEL = ../arm/amin.c ++DAMINKERNEL = ../arm/amin.c ++CAMINKERNEL = ../arm/zamin.c ++ZAMINKERNEL = ../arm/zamin.c ++ ++SMAXKERNEL = ../arm/max.c ++DMAXKERNEL = ../arm/max.c ++ ++SMINKERNEL = ../arm/min.c ++DMINKERNEL = ../arm/min.c ++ ++ISAMAXKERNEL = ../arm/iamax.c ++IDAMAXKERNEL = ../arm/iamax.c ++ICAMAXKERNEL = ../arm/izamax.c ++IZAMAXKERNEL = ../arm/izamax.c ++ ++ISAMINKERNEL = ../arm/iamin.c ++IDAMINKERNEL = ../arm/iamin.c ++ICAMINKERNEL = ../arm/izamin.c ++IZAMINKERNEL = ../arm/izamin.c ++ ++ISMAXKERNEL = ../arm/imax.c ++IDMAXKERNEL = ../arm/imax.c ++ ++ISMINKERNEL = ../arm/imin.c ++IDMINKERNEL = ../arm/imin.c ++ ++SASUMKERNEL = ../arm/asum.c ++DASUMKERNEL = ../arm/asum.c ++CASUMKERNEL = ../arm/zasum.c ++ZASUMKERNEL = ../arm/zasum.c ++ ++SAXPYKERNEL = ../arm/axpy.c ++DAXPYKERNEL = ../arm/axpy.c ++CAXPYKERNEL = ../arm/zaxpy.c ++ZAXPYKERNEL = ../arm/zaxpy.c ++ ++SCOPYKERNEL = ../arm/copy.c ++DCOPYKERNEL = ../arm/copy.c ++CCOPYKERNEL = ../arm/zcopy.c ++ZCOPYKERNEL = ../arm/zcopy.c ++ ++SDOTKERNEL = ../arm/dot.c ++DDOTKERNEL = ../arm/dot.c ++CDOTKERNEL = ../arm/zdot.c ++ZDOTKERNEL = ../arm/zdot.c ++ ++SNRM2KERNEL = ../arm/nrm2.c ++DNRM2KERNEL = ../arm/nrm2.c ++CNRM2KERNEL = ../arm/znrm2.c ++ZNRM2KERNEL = ../arm/znrm2.c ++ ++SROTKERNEL = ../arm/rot.c ++DROTKERNEL = ../arm/rot.c ++CROTKERNEL = ../arm/zrot.c ++ZROTKERNEL = ../arm/zrot.c ++ ++SSCALKERNEL = ../arm/scal.c ++DSCALKERNEL = ../arm/scal.c ++CSCALKERNEL = ../arm/zscal.c ++ZSCALKERNEL = ../arm/zscal.c ++ ++SSWAPKERNEL = ../arm/swap.c ++DSWAPKERNEL = ../arm/swap.c ++CSWAPKERNEL = ../arm/zswap.c ++ZSWAPKERNEL = ../arm/zswap.c ++ ++SGEMVNKERNEL = ../arm/gemv_n.c ++DGEMVNKERNEL = ../arm/gemv_n.c ++CGEMVNKERNEL = ../arm/zgemv_n.c ++ZGEMVNKERNEL = ../arm/zgemv_n.c ++ ++SGEMVTKERNEL = ../arm/gemv_t.c ++DGEMVTKERNEL = ../arm/gemv_t.c ++CGEMVTKERNEL = ../arm/zgemv_t.c ++ZGEMVTKERNEL = ../arm/zgemv_t.c ++ ++STRMMKERNEL = ../generic/trmmkernel_2x2.c ++DTRMMKERNEL = ../generic/trmmkernel_2x2.c ++CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c ++ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c ++ ++SGEMMKERNEL = ../generic/gemmkernel_2x2.c ++SGEMMONCOPY = ../generic/gemm_ncopy_2.c ++SGEMMOTCOPY = ../generic/gemm_tcopy_2.c ++SGEMMONCOPYOBJ = sgemm_oncopy.o ++SGEMMOTCOPYOBJ = sgemm_otcopy.o ++ ++DGEMMKERNEL = ../generic/gemmkernel_2x2.c ++DGEMMONCOPY = ../generic/gemm_ncopy_2.c ++DGEMMOTCOPY = ../generic/gemm_tcopy_2.c ++DGEMMONCOPYOBJ = dgemm_oncopy.o ++DGEMMOTCOPYOBJ = dgemm_otcopy.o ++ ++CGEMMKERNEL = ../generic/zgemmkernel_2x2.c ++CGEMMONCOPY = ../generic/zgemm_ncopy_2.c ++CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c ++CGEMMONCOPYOBJ = cgemm_oncopy.o ++CGEMMOTCOPYOBJ = cgemm_otcopy.o ++ ++ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c ++ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c ++ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c ++ZGEMMONCOPYOBJ = zgemm_oncopy.o ++ZGEMMOTCOPYOBJ = zgemm_otcopy.o ++ ++STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++ ++ ++ +diff --git a/kernel/zarch/Makefile b/kernel/zarch/Makefile +new file mode 100644 +index 00000000..efae70d7 +--- /dev/null ++++ b/kernel/zarch/Makefile +@@ -0,0 +1,2 @@ ++clean :: ++ +diff --git a/param.h b/param.h +index 480518cd..0268fb5e 100644 +--- a/param.h ++++ b/param.h +@@ -2509,6 +2509,45 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + #endif + + ++#if defined(ZARCH_GENERIC) ++#define SNUMOPT 2 ++#define DNUMOPT 2 ++ ++#define GEMM_DEFAULT_OFFSET_A 0 ++#define GEMM_DEFAULT_OFFSET_B 0 ++#define GEMM_DEFAULT_ALIGN 0x03fffUL ++ ++#define SGEMM_DEFAULT_UNROLL_M 2 ++#define SGEMM_DEFAULT_UNROLL_N 2 ++ ++#define DGEMM_DEFAULT_UNROLL_M 2 ++#define DGEMM_DEFAULT_UNROLL_N 2 ++ ++#define CGEMM_DEFAULT_UNROLL_M 2 ++#define CGEMM_DEFAULT_UNROLL_N 2 ++ ++#define ZGEMM_DEFAULT_UNROLL_M 2 ++#define ZGEMM_DEFAULT_UNROLL_N 2 ++ ++#define SGEMM_DEFAULT_P 128 ++#define DGEMM_DEFAULT_P 128 ++#define CGEMM_DEFAULT_P 96 ++#define ZGEMM_DEFAULT_P 64 ++ ++#define SGEMM_DEFAULT_Q 240 ++#define DGEMM_DEFAULT_Q 120 ++#define CGEMM_DEFAULT_Q 120 ++#define ZGEMM_DEFAULT_Q 120 ++ ++#define SGEMM_DEFAULT_R 12288 ++#define DGEMM_DEFAULT_R 8192 ++#define CGEMM_DEFAULT_R 4096 ++#define ZGEMM_DEFAULT_R 4096 ++ ++ ++#define SYMV_P 16 ++#endif ++ + + #ifdef GENERIC + +-- +2.12.2 + + +From f18efc365072feaedc5730b1a0153ab505b8deaa Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Dan=20Hor=C3=A1k?= +Date: Thu, 13 Apr 2017 12:21:10 +0200 +Subject: [PATCH 2/6] add lapack laswp for zarch + +(cherry picked from commit 56762d5e4c54428ef20e14610f1535a74e5ac701) +--- + lapack/laswp/zarch/Makefile | 8 ++++++++ + 1 file changed, 8 insertions(+) + create mode 100644 lapack/laswp/zarch/Makefile + +diff --git a/lapack/laswp/zarch/Makefile b/lapack/laswp/zarch/Makefile +new file mode 100644 +index 00000000..af1f0199 +--- /dev/null ++++ b/lapack/laswp/zarch/Makefile +@@ -0,0 +1,8 @@ ++TOPDIR = ../../.. ++include ../../../Makefile.system ++ ++LASWP = ../generic/laswp_k_1.c ++ZLASWP = ../generic/zlaswp_k_1.c ++ ++include ../generic/Makefile ++ +-- +2.12.2 + + +From d105ac97e1ad4455a76a7929a04a43267daa1191 Mon Sep 17 00:00:00 2001 +From: Abdurrauf +Date: Wed, 4 Jan 2017 19:32:33 +0400 +Subject: [PATCH 3/6] dtrmm and dgemm for z13 + +(cherry picked from commit 64186678180c08db3f43524082790394a00c5008) +--- + CONTRIBUTORS.md | 4 + + Makefile.zarch | 4 +- + README.md | 5 + + common_zarch.h | 3 +- + cpuid_zarch.c | 4 +- + kernel/zarch/KERNEL.Z13 | 141 ++++ + kernel/zarch/KERNEL.ZARCH_GENERIC | 1 - + kernel/zarch/gemm8x4V.S | 615 +++++++++++++++ + kernel/zarch/kernelMacros.S | 1529 +++++++++++++++++++++++++++++++++++++ + kernel/zarch/trmm8x4V.S | 877 +++++++++++++++++++++ + param.h | 40 + + 11 files changed, 3218 insertions(+), 5 deletions(-) + create mode 100644 kernel/zarch/KERNEL.Z13 + create mode 100644 kernel/zarch/gemm8x4V.S + create mode 100644 kernel/zarch/kernelMacros.S + create mode 100644 kernel/zarch/trmm8x4V.S + +diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md +index 5ecf32b9..0489599a 100644 +--- a/CONTRIBUTORS.md ++++ b/CONTRIBUTORS.md +@@ -161,3 +161,7 @@ In chronological order: + * Kaustubh Raste + * [2016-05-09] DTRSM optimization for MIPS P5600 and I6400 using MSA + * [2016-05-20] STRSM optimization for MIPS P5600 and I6400 using MSA ++ ++* Abdelrauf ++ * [2017-01-01] dgemm and dtrmm kernels for IBM z13 ++ +diff --git a/Makefile.zarch b/Makefile.zarch +index 138c5941..9ec9dc79 100644 +--- a/Makefile.zarch ++++ b/Makefile.zarch +@@ -1,6 +1,6 @@ + + ifeq ($(CORE), Z13) +-CCOMMON_OPT += -march=z13 +-FCOMMON_OPT += -march=z13 ++CCOMMON_OPT += -march=z13 -mzvector ++FCOMMON_OPT += -march=z13 -mzvector + endif + +diff --git a/README.md b/README.md +index ff55edaa..5428f0eb 100644 +--- a/README.md ++++ b/README.md +@@ -106,6 +106,11 @@ Please read GotoBLAS_01Readme.txt + - **ARMV8**: Experimental + - **ARM Cortex-A57**: Experimental + ++#### IBM zEnterprise System: ++- **Z13**: Double precision real number ++ git checkout z13 ++ make USE_TRMM=1 ++ + ### Support OS: + - **GNU/Linux** + - **MingWin or Visual Studio(CMake)/Windows**: Please read . +diff --git a/common_zarch.h b/common_zarch.h +index 7c04cf42..e105574e 100644 +--- a/common_zarch.h ++++ b/common_zarch.h +@@ -103,10 +103,11 @@ static inline int blas_quickdivide(blasint x, blasint y){ + + #define PROLOGUE \ + .text ;\ +- .align 4 ;\ ++ .align 256 ;\ + .global REALNAME ;\ + .type REALNAME, %function ;\ + REALNAME: ++ + + #define EPILOGUE + +diff --git a/cpuid_zarch.c b/cpuid_zarch.c +index 248cd47e..e2e3b046 100644 +--- a/cpuid_zarch.c ++++ b/cpuid_zarch.c +@@ -42,7 +42,9 @@ static char *cpuname_lower[] = { + + int detect(void) + { +- return CPU_GENERIC; ++ // return CPU_GENERIC; ++ return CPU_Z13; ++ + } + + void get_libname(void) +diff --git a/kernel/zarch/KERNEL.Z13 b/kernel/zarch/KERNEL.Z13 +new file mode 100644 +index 00000000..91885da8 +--- /dev/null ++++ b/kernel/zarch/KERNEL.Z13 +@@ -0,0 +1,141 @@ ++SAMAXKERNEL = ../arm/amax.c ++DAMAXKERNEL = ../arm/amax.c ++CAMAXKERNEL = ../arm/zamax.c ++ZAMAXKERNEL = ../arm/zamax.c ++ ++SAMINKERNEL = ../arm/amin.c ++DAMINKERNEL = ../arm/amin.c ++CAMINKERNEL = ../arm/zamin.c ++ZAMINKERNEL = ../arm/zamin.c ++ ++SMAXKERNEL = ../arm/max.c ++DMAXKERNEL = ../arm/max.c ++ ++SMINKERNEL = ../arm/min.c ++DMINKERNEL = ../arm/min.c ++ ++ISAMAXKERNEL = ../arm/iamax.c ++IDAMAXKERNEL = ../arm/iamax.c ++ICAMAXKERNEL = ../arm/izamax.c ++IZAMAXKERNEL = ../arm/izamax.c ++ ++ISAMINKERNEL = ../arm/iamin.c ++IDAMINKERNEL = ../arm/iamin.c ++ICAMINKERNEL = ../arm/izamin.c ++IZAMINKERNEL = ../arm/izamin.c ++ ++ISMAXKERNEL = ../arm/imax.c ++IDMAXKERNEL = ../arm/imax.c ++ ++ISMINKERNEL = ../arm/imin.c ++IDMINKERNEL = ../arm/imin.c ++ ++SASUMKERNEL = ../arm/asum.c ++DASUMKERNEL = ../arm/asum.c ++CASUMKERNEL = ../arm/zasum.c ++ZASUMKERNEL = ../arm/zasum.c ++ ++SAXPYKERNEL = ../arm/axpy.c ++DAXPYKERNEL = ../arm/axpy.c ++CAXPYKERNEL = ../arm/zaxpy.c ++ZAXPYKERNEL = ../arm/zaxpy.c ++ ++SCOPYKERNEL = ../arm/copy.c ++DCOPYKERNEL = ../arm/copy.c ++CCOPYKERNEL = ../arm/zcopy.c ++ZCOPYKERNEL = ../arm/zcopy.c ++ ++SDOTKERNEL = ../arm/dot.c ++DDOTKERNEL = ../arm/dot.c ++CDOTKERNEL = ../arm/zdot.c ++ZDOTKERNEL = ../arm/zdot.c ++ ++SNRM2KERNEL = ../arm/nrm2.c ++DNRM2KERNEL = ../arm/nrm2.c ++CNRM2KERNEL = ../arm/znrm2.c ++ZNRM2KERNEL = ../arm/znrm2.c ++ ++SROTKERNEL = ../arm/rot.c ++DROTKERNEL = ../arm/rot.c ++CROTKERNEL = ../arm/zrot.c ++ZROTKERNEL = ../arm/zrot.c ++ ++SSCALKERNEL = ../arm/scal.c ++DSCALKERNEL = ../arm/scal.c ++CSCALKERNEL = ../arm/zscal.c ++ZSCALKERNEL = ../arm/zscal.c ++ ++SSWAPKERNEL = ../arm/swap.c ++DSWAPKERNEL = ../arm/swap.c ++CSWAPKERNEL = ../arm/zswap.c ++ZSWAPKERNEL = ../arm/zswap.c ++ ++SGEMVNKERNEL = ../arm/gemv_n.c ++DGEMVNKERNEL = ../arm/gemv_n.c ++CGEMVNKERNEL = ../arm/zgemv_n.c ++ZGEMVNKERNEL = ../arm/zgemv_n.c ++ ++SGEMVTKERNEL = ../arm/gemv_t.c ++DGEMVTKERNEL = ../arm/gemv_t.c ++CGEMVTKERNEL = ../arm/zgemv_t.c ++ZGEMVTKERNEL = ../arm/zgemv_t.c ++ ++STRMMKERNEL = ../generic/trmmkernel_2x2.c ++DTRMMKERNEL = trmm8x4V.S ++CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c ++ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c ++ ++SGEMMKERNEL = ../generic/gemmkernel_2x2.c ++SGEMMONCOPY = ../generic/gemm_ncopy_2.c ++SGEMMOTCOPY = ../generic/gemm_tcopy_2.c ++SGEMMONCOPYOBJ = sgemm_oncopy.o ++SGEMMOTCOPYOBJ = sgemm_otcopy.o ++ ++ ++ ++DGEMMKERNEL = gemm8x4V.S ++DGEMMINCOPY = ../generic/gemm_ncopy_8.c ++DGEMMITCOPY = ../generic/gemm_tcopy_8.c ++DGEMMONCOPY = ../generic/gemm_ncopy_4.c ++DGEMMOTCOPY = ../generic/gemm_tcopy_4.c ++DGEMMINCOPYOBJ = dgemm_incopy.o ++DGEMMITCOPYOBJ = dgemm_itcopy.o ++DGEMMONCOPYOBJ = dgemm_oncopy.o ++DGEMMOTCOPYOBJ = dgemm_otcopy.o ++ ++CGEMMKERNEL = ../generic/zgemmkernel_2x2.c ++CGEMMONCOPY = ../generic/zgemm_ncopy_2.c ++CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c ++CGEMMONCOPYOBJ = cgemm_oncopy.o ++CGEMMOTCOPYOBJ = cgemm_otcopy.o ++ ++ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c ++ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c ++ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c ++ZGEMMONCOPYOBJ = zgemm_oncopy.o ++ZGEMMOTCOPYOBJ = zgemm_otcopy.o ++ ++STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ++ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ++ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ++ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c ++ ++ ++ ++ ++ +diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC +index 27157dad..d80f84e7 100644 +--- a/kernel/zarch/KERNEL.ZARCH_GENERIC ++++ b/kernel/zarch/KERNEL.ZARCH_GENERIC +@@ -131,4 +131,3 @@ ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + + + +- +diff --git a/kernel/zarch/gemm8x4V.S b/kernel/zarch/gemm8x4V.S +new file mode 100644 +index 00000000..0b4bc73c +--- /dev/null ++++ b/kernel/zarch/gemm8x4V.S +@@ -0,0 +1,615 @@ ++/*************************************************************************** ++Copyright (c) 2013-2017, The OpenBLAS Project ++All rights reserved. ++Redistribution and use in source and binary forms, with or without ++modification, are permitted provided that the following conditions are ++met: ++1. Redistributions of source code must retain the above copyright ++notice, this list of conditions and the following disclaimer. ++2. Redistributions in binary form must reproduce the above copyright ++notice, this list of conditions and the following disclaimer in ++the documentation and/or other materials provided with the ++distribution. ++3. Neither the name of the OpenBLAS project nor the names of ++its contributors may be used to endorse or promote products ++derived from this software without specific prior written permission. ++THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE ++LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE ++USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++*****************************************************************************/ ++ ++/************************************************************************************** ++* 2017/01/01 AbdelRauf (quickwritereader@gmail.com) ++* BLASTEST : OK ++* CTEST : OK ++* TEST : OK ++**************************************************************************************/ ++ ++/*********************************************************************/ ++/* Copyright 2009, 2010 The University of Texas at Austin. */ ++/* All rights reserved. */ ++/* */ ++/* Redistribution and use in source and binary forms, with or */ ++/* without modification, are permitted provided that the following */ ++/* conditions are met: */ ++/* */ ++/* 1. Redistributions of source code must retain the above */ ++/* copyright notice, this list of conditions and the following */ ++/* disclaimer. */ ++/* */ ++/* 2. Redistributions in binary form must reproduce the above */ ++/* copyright notice, this list of conditions and the following */ ++/* disclaimer in the documentation and/or other materials */ ++/* provided with the distribution. */ ++/* */ ++/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ ++/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ ++/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ ++/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ ++/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ ++/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ ++/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ ++/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ ++/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ ++/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ ++/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ ++/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ ++/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ ++/* POSSIBILITY OF SUCH DAMAGE. */ ++/* */ ++/* The views and conclusions contained in the software and */ ++/* documentation are those of the authors and should not be */ ++/* interpreted as representing official policies, either expressed */ ++/* or implied, of The University of Texas at Austin. */ ++/*********************************************************************/ ++ ++#define ASSEMBLER ++#include "common.h" ++ ++/************** Notes ON IBM abi and IBM assembly********************************************** ++* General registers r0 and r1 should be used internally whenever possible ++* General registers r2 to r5 should be second choice ++* General registers r12 to r15 should only be used for their standard function. ++* r0 should not be used as address disp register ++ ++#BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc ++ ##bm=r2,bn=r3, bk=r4, alpha=f0,ba=r5,bb=r6,stack[160] ,ldc=stack[168] ++**********************************************************************************************/ ++ ++ ++#define BM %r2 ++#define BM_CUR %r0 ++#define BN %r3 ++#define BN_CUR %r10 ++#define BK %r4 ++#define LDC_BYTE %r8 ++#define ALPHA %f0 ++#define ALPHA_VECT %v0 ++#define LOCAL_VAR1 %r9 ++#define LOCAL_VAR2 %r1 ++#define LOCAL_VAR3 %r11 ++#define A %r5 ++#define B %r6 ++#define CIJ %r7 ++#define CIJ_LOCAL %r12 ++#define ALIGN_4 .align 16 ++#define ALIGN_2 .align 8 ++#define PREFETCH_INS 1 ++ ++#include "kernelMacros.S" ++ ++/***********************************DGEMM***********************************************************/ ++ ++PROLOGUE ++ ++stmg %r6,%r12,40(%r15) ++lg CIJ, 160(%r15) ++lg LOCAL_VAR1, 168(%r15) ++srlg BN_CUR,BN,2 ++vrepg ALPHA_VECT,ALPHA_VECT,0 /*replicate alpha which in f0*/ ++sllg LDC_BYTE, LOCAL_VAR1,3 /*calculate lcd stride with bytes double=8 x<<3 */ ++cijle BN_CUR,0,.LX2 ++ ++ALIGN_4 ++.LX4_BN: ++#if defined(PREFETCH_INS) ++ pfd 1, 0(A) ++ pfd 1, 256(A) ++ pfd 1, 0(B) ++ pfd 1, 256(B) ++#endif ++srlg BM_CUR,BM,3 ++lgr LOCAL_VAR3,A ++lgr CIJ_LOCAL,CIJ ++cijle BM_CUR,0,.L4x4 ++ ++ALIGN_4 ++.L8x4_BM: /*BM_CUR LOOP */ ++ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_8x4 ++cijle LOCAL_VAR1,0,.L8x4_mod ++ ++ALIGN_4 ++.L8x4_4_BK: /*BK_CUR LOOP */ ++#if defined(PREFETCH_INS) ++ pfd 1, 512(LOCAL_VAR3) ++#endif ++ CALC_8x4_4 LOCAL_VAR3,LOCAL_VAR2 ++#if defined(PREFETCH_INS) ++ pfd 1, 512(LOCAL_VAR2) ++#endif ++brctg LOCAL_VAR1,.L8x4_4_BK ++ ++ALIGN_4 ++.L8x4_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L8x4_BK_Store ++ ++ALIGN_4 ++.L8x4_BK: /*BK_CUR LOOP */ ++ CALC_8x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x4_BK ++ ++ALIGN_4 ++.L8x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_8x4 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE ++ ++brctg BM_CUR,.L8x4_BM ++ ++ALIGN_4 ++.L4x4: ++ ++tmll BM,4 ++jz .L2x4 ++ ++ALIGN_4 ++.L4x4_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_4x4 ++cijle LOCAL_VAR1,0,.L4x4_mod ++ ++ALIGN_4 ++.L4x4_4_BK: /*BK_CUR LOOP */ ++ CALC_4x4_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x4_4_BK ++ ++ALIGN_4 ++.L4x4_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L4x4_BK_Store ++ ++ALIGN_4 ++.L4x4_BK: /*BK_CUR LOOP */ ++ CALC_4x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x4_BK ++ ++ALIGN_4 ++.L4x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_4x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_2 ++.L2x4: ++ ++tmll BM,2 ++jz .L1x4 ++ ++ALIGN_4 ++.L2x4_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_2x4 ++cijle LOCAL_VAR1,0,.L2x4_mod ++ ++ALIGN_4 ++.L2x4_4_BK: /*BK_CUR LOOP */ ++ CALC_2x4_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x4_4_BK ++ ++ALIGN_4 ++.L2x4_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L2x4_BK_Store ++ ++ALIGN_4 ++.L2x4_BK: /*BK_CUR LOOP */ ++ CALC_2x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x4_BK ++ ++ALIGN_4 ++.L2x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_2x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ ++ALIGN_4 ++.L1x4: ++ ++tmll BM,1 ++jz .Lx4_INNER_END ++ ++ALIGN_4 ++.L1x4_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_1x4 ++cijle LOCAL_VAR1,0,.L1x4_mod ++ ++ALIGN_4 ++.L1x4_4_BK: /*BK_CUR LOOP */ ++ CALC_1x4_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x4_4_BK ++ ++ALIGN_4 ++.L1x4_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L1x4_BK_Store ++ ++ALIGN_4 ++.L1x4_BK: /*BK_CUR LOOP */ ++ CALC_1x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x4_BK ++ ++ALIGN_4 ++.L1x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_1x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_2 ++.Lx4_INNER_END: ++ ++/*add LDC_BYTE_COPY to new*/ ++sllg LOCAL_VAR1,LDC_BYTE,2 /*multiply*4 */ ++sllg LOCAL_VAR2,BK,5 /*muyliply*4*sizeof(double) =multiply*32* 2**5 */ ++la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ ++la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ ++ ++brctg BN_CUR,.LX4_BN ++ ++/*********************************X2 SECTION************************************************/ ++ALIGN_4 ++.LX2: ++tmll BN,2 ++jz .Lx1 ++ ++ALIGN_4 ++.Lx2_BN: ++srlg BM_CUR,BM,3 ++lgr LOCAL_VAR3,A ++lgr CIJ_LOCAL,CIJ ++cijle BM_CUR,0,.L4x2 ++ ++ ++ALIGN_4 ++.L8x2_BM: /*BM_CUR LOOP */ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_8x2 ++cijle LOCAL_VAR1,0,.L8x2_mod ++ ++ALIGN_4 ++.L8x2_4_BK: /*BK_CUR LOOP */ ++#if defined(PREFETCH_INS) ++ pfd 1, 256(LOCAL_VAR3) ++ pfd 1,64(LOCAL_VAR2) ++#endif ++ CALC_8x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x2_4_BK ++ ++ALIGN_4 ++.L8x2_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L8x2_BK_Store ++ ++ALIGN_4 ++.L8x2_BK: /*BK_CUR LOOP */ ++ CALC_8x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x2_BK ++ ++ALIGN_4 ++.L8x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_8x2 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_4 ++brctg BM_CUR,.L8x2_BM ++ ++ALIGN_2 ++.L4x2: ++ ++tmll BM,4 ++jz .L2x2 ++ ++ALIGN_4 ++.L4x2_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_4x2 ++cijle LOCAL_VAR1,0,.L4x2_mod ++ ++ALIGN_4 ++.L4x2_4_BK: /*BK_CUR LOOP */ ++ CALC_4x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x2_4_BK ++ ++ALIGN_4 ++.L4x2_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L4x2_BK_Store ++ ++ALIGN_4 ++.L4x2_BK: /*BK_CUR LOOP */ ++ CALC_4x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x2_BK ++ ++ALIGN_4 ++.L4x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_4x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_2 ++.L2x2: ++ ++tmll BM,2 ++jz .L1x2 ++ ++ALIGN_4 ++.L2x2_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_2x2 ++cijle LOCAL_VAR1,0,.L2x2_mod ++ ++ALIGN_4 ++.L2x2_4_BK: /*BK_CUR LOOP */ ++ CALC_2x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x2_4_BK ++ ++ALIGN_4 ++.L2x2_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L2x2_BK_Store ++ ++ALIGN_4 ++.L2x2_BK: /*BK_CUR LOOP */ ++ CALC_2x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x2_BK ++ ++ALIGN_4 ++.L2x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_2x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ ++ALIGN_2 ++.L1x2: ++ ++tmll BM,1 ++jz .Lx2_INNER_END ++ ++ALIGN_4 ++.L1x2_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_1x2 ++cijle LOCAL_VAR1,0,.L1x2_mod ++ ++ALIGN_4 ++.L1x2_4_BK: /*BK_CUR LOOP */ ++ CALC_1x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x2_4_BK ++ ++ALIGN_4 ++.L1x2_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L1x2_BK_Store ++ ++ALIGN_4 ++.L1x2_BK: /*BK_CUR LOOP */ ++ CALC_1x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x2_BK ++ ++ALIGN_4 ++.L1x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_1x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_2 ++.Lx2_INNER_END: ++/*add LDC_BYTE_COPY to new*/ ++la LOCAL_VAR1,0(LDC_BYTE,LDC_BYTE) /*multiply*2 */ ++sllg LOCAL_VAR2,BK,4 /*muyliply*2*sizeof(double) =multiply*16* 2**4 */ ++la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ ++la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ ++ ++ ++ ++ ++/*********************************X1 SECTION************************************************/ ++ALIGN_2 ++.Lx1: ++tmll BN,1 ++jz .L_FUNC_END ++ ++ALIGN_4 ++.Lx1_BN: ++srlg BM_CUR,BM,3 ++lgr LOCAL_VAR3,A ++lgr CIJ_LOCAL,CIJ ++cijle BM_CUR,0,.L4x1 ++ ++ ++ALIGN_4 ++.L8x1_BM: /*BM_CUR LOOP */ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_8x1 ++cijle LOCAL_VAR1,0,.L8x1_mod ++ ++ALIGN_4 ++.L8x1_4_BK: /*BK_CUR LOOP */ ++#if defined(PREFETCH_INS) ++ pfd 1, 256(LOCAL_VAR3) ++#endif ++ CALC_8x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x1_4_BK ++ ++ALIGN_4 ++.L8x1_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L8x1_BK_Store ++ ++ALIGN_4 ++.L8x1_BK: /*BK_CUR LOOP */ ++ CALC_8x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x1_BK ++ ++ALIGN_4 ++.L8x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_8x1 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_4 ++brctg BM_CUR,.L8x1_BM ++ ++ALIGN_2 ++.L4x1: ++ ++tmll BM,4 ++jz .L2x1 ++ ++ALIGN_4 ++.L4x1_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_4x1 ++cijle LOCAL_VAR1,0,.L4x1_mod ++ ++ALIGN_4 ++.L4x1_4_BK: /*BK_CUR LOOP */ ++ CALC_4x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x1_4_BK ++ ++ALIGN_4 ++.L4x1_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L4x1_BK_Store ++ ++ALIGN_4 ++.L4x1_BK: /*BK_CUR LOOP */ ++ CALC_4x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x1_BK ++ ++ALIGN_4 ++.L4x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_4x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_2 ++.L2x1: ++ ++tmll BM,2 ++jz .L1x1 ++ ++ALIGN_4 ++.L2x1_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_2x1 ++cijle LOCAL_VAR1,0,.L2x1_mod ++ ++ALIGN_4 ++.L2x1_4_BK: /*BK_CUR LOOP */ ++ CALC_2x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x1_4_BK ++ ++ALIGN_4 ++.L2x1_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L2x1_BK_Store ++ ++ALIGN_4 ++.L2x1_BK: /*BK_CUR LOOP */ ++ CALC_2x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x1_BK ++ ++ALIGN_4 ++.L2x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_2x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ ++ ++ALIGN_2 ++.L1x1: ++ ++tmll BM, 1 ++jz .Lx1_INNER_END ++ ++ALIGN_4 ++.L1x1_BM: /*BM start*/ ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++ZERO_CVEC_1x1 ++cijle LOCAL_VAR1,0,.L1x1_mod ++ ++ALIGN_4 ++.L1x1_4_BK: /*BK_CUR LOOP */ ++ CALC_1x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x1_4_BK ++ ++ALIGN_4 ++.L1x1_mod: ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++jz .L1x1_BK_Store ++ ++ALIGN_4 ++.L1x1_BK: /*BK_CUR LOOP */ ++ CALC_1x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x1_BK ++ ++ALIGN_4 ++.L1x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_1x1 ALPHA ,CIJ_LOCAL, LDC_BYTE ++ ++ALIGN_2 ++.Lx1_INNER_END: ++/*add LDC_BYTE_COPY to new*/ ++sllg LOCAL_VAR2,BK,3 /*muyliply*2*sizeof(double) =multiply*8* 2**3 */ ++la CIJ,0(CIJ,LDC_BYTE) /*refresh CIJ=CIJ+LDC_BYTE */ ++la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*1*sizeof(double) */ ++ ++ ++ALIGN_2 ++.L_FUNC_END: ++/*end*/ ++lmg %r6,%r12,40(%r15) ++br %r14 ++.end ++ ++ ++ ++ +diff --git a/kernel/zarch/kernelMacros.S b/kernel/zarch/kernelMacros.S +new file mode 100644 +index 00000000..cac4cb3d +--- /dev/null ++++ b/kernel/zarch/kernelMacros.S +@@ -0,0 +1,1529 @@ ++/*********************************KERNEL 8x4***********************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_8x4 ++ vzero %v16 ++ vzero %v17 ++ vzero %v18 ++ vzero %v19 ++ vzero %v20 ++ vzero %v21 ++ vzero %v22 ++ vzero %v23 ++ vzero %v24 ++ vzero %v25 ++ vzero %v26 ++ vzero %v27 ++ vzero %v28 ++ vzero %v29 ++ vzero %v30 ++ vzero %v31 ++.endm ++ ++/*Calculate for 8x4 C blocks*/ ++.macro CALC_8x4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vl %v4, 32(\PTR_A_REG) ++ vl %v5, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,16(\PTR_B_REG) ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ vlrepg %v1,24(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v26,%v4,%v7,%v26 ++ la \PTR_A_REG, 64(\PTR_A_REG) ++ vfmadb %v27,%v5,%v7,%v27 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ la \PTR_B_REG, 32(\PTR_B_REG) ++ vfmadb %v30,%v4,%v1,%v30 ++ vfmadb %v31,%v5,%v1,%v31 ++.endm ++ ++/*Calculate for 8x4_4 C blocks*/ ++.macro CALC_8x4_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vl %v4, 32(\PTR_A_REG) ++ vl %v5, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,16(\PTR_B_REG) ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ vlrepg %v1,24(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v26,%v4,%v7,%v26 ++ vfmadb %v27,%v5,%v7,%v27 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ vfmadb %v30,%v4,%v1,%v30 ++ vfmadb %v31,%v5,%v1,%v31 ++ ++ vlrepg %v7, 32(\PTR_B_REG) ++ vlrepg %v1,40(\PTR_B_REG) ++ vl %v2, 64(\PTR_A_REG) ++ vl %v3, 80(\PTR_A_REG) ++ vl %v4, 96(\PTR_A_REG) ++ vl %v5, 112(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,48(\PTR_B_REG) ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ vlrepg %v1,56(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v26,%v4,%v7,%v26 ++ vfmadb %v27,%v5,%v7,%v27 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ vfmadb %v30,%v4,%v1,%v30 ++ vfmadb %v31,%v5,%v1,%v31 ++ ++ vlrepg %v7, 64(\PTR_B_REG) ++ vlrepg %v1,72(\PTR_B_REG) ++ vl %v2, 128(\PTR_A_REG) ++ vl %v3, 144(\PTR_A_REG) ++ vl %v4, 160(\PTR_A_REG) ++ vl %v5, 176(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,80(\PTR_B_REG) ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ vlrepg %v1,88(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v26,%v4,%v7,%v26 ++ vfmadb %v27,%v5,%v7,%v27 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ vfmadb %v30,%v4,%v1,%v30 ++ vfmadb %v31,%v5,%v1,%v31 ++ ++ vlrepg %v7, 96(\PTR_B_REG) ++ vlrepg %v1,104(\PTR_B_REG) ++ vl %v2, 192(\PTR_A_REG) ++ vl %v3, 208(\PTR_A_REG) ++ vl %v4, 224(\PTR_A_REG) ++ vl %v5, 240(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,112(\PTR_B_REG) ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ vlrepg %v1,120(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v26,%v4,%v7,%v26 ++ vfmadb %v27,%v5,%v7,%v27 ++ la \PTR_B_REG, 128(\PTR_B_REG) ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ vfmadb %v30,%v4,%v1,%v30 ++ la \PTR_A_REG, 256(\PTR_A_REG) ++ vfmadb %v31,%v5,%v1,%v31 ++ ++.endm ++ ++ ++/*STORE C8X4*/ ++.macro STORE_8x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v2,16(\CIJ_REG) ++ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG) ++ ++ vl %v3,32(\CIJ_REG) ++ vfmadb %v3,%v18,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG) ++ ++ vl %v4,48(\CIJ_REG) ++ vfmadb %v4,%v19,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG) ++ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ ++ ++ /*add c LDC_BYTE*/ ++ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ ++ vl %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v3,%v22,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v4,%v23,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ ++ vl %v1,0(\CIJ_REG,LOCAL_VAR1) ++ vfmadb %v1,%v24,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,LOCAL_VAR1) ++ ++ vl %v2,16(\CIJ_REG,LOCAL_VAR1) ++ vfmadb %v2,%v25,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,LOCAL_VAR1) ++ ++ vl %v3,32(\CIJ_REG,LOCAL_VAR1) ++ vfmadb %v3,%v26,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG,LOCAL_VAR1) ++ ++ vl %v4,48(\CIJ_REG,LOCAL_VAR1) ++ vfmadb %v4,%v27,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG,LOCAL_VAR1) ++ ++ ++ vl %v1,0(\CIJ_REG,LOCAL_VAR2) ++ vfmadb %v1,%v28,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,LOCAL_VAR2) ++ ++ vl %v2,16(\CIJ_REG,LOCAL_VAR2) ++ vfmadb %v2,%v29,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,LOCAL_VAR2) ++ ++ vl %v3,32(\CIJ_REG,LOCAL_VAR2) ++ vfmadb %v3,%v30,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG,LOCAL_VAR2) ++ ++ vl %v4,48(\CIJ_REG,LOCAL_VAR2) ++ vfmadb %v4,%v31,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG,LOCAL_VAR2) ++ ++ la \CIJ_REG,64(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C8X4*/ ++.macro STORE_TRMM_8x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ ++ vfmdb %v2,%v17,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG) ++ vfmdb %v3,%v18,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG) ++ vfmdb %v4,%v19,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG) ++ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ ++ /*add c LDC_BYTE*/ ++ vfmdb %v1,%v20,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v2,%v21,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vfmdb %v3,%v22,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v4,%v23,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vfmdb %v1,%v24,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,LOCAL_VAR1) ++ vfmdb %v2,%v25,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,LOCAL_VAR1) ++ vfmdb %v3,%v26,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG,LOCAL_VAR1) ++ vfmdb %v4,%v27,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG,LOCAL_VAR1) ++ ++ vfmdb %v1,%v28,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,LOCAL_VAR2) ++ vfmdb %v2,%v29,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,LOCAL_VAR2) ++ vfmdb %v3,%v30,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG,LOCAL_VAR2) ++ vfmdb %v4,%v31,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG,LOCAL_VAR2) ++ la \CIJ_REG,64(\CIJ_REG) ++ ++.endm ++/**************************************Kernel4x4*************************************************/ ++ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_4x4 ++ vzero %v16 ++ vzero %v17 ++ vzero %v20 ++ vzero %v21 ++ vzero %v24 ++ vzero %v25 ++ vzero %v28 ++ vzero %v29 ++.endm ++ ++/*Calculate for 4x4 C blocks*/ ++.macro CALC_4x4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,16(\PTR_B_REG) ++ vlrepg %v1,24(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ la \PTR_A_REG, 32(\PTR_A_REG) ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ la \PTR_B_REG, 32(\PTR_B_REG) ++.endm ++ ++.macro CALC_4x4_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,16(\PTR_B_REG) ++ vlrepg %v1,24(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ ++ vlrepg %v7, 32(\PTR_B_REG) ++ vlrepg %v1,40(\PTR_B_REG) ++ vl %v2, 32(\PTR_A_REG) ++ vl %v3, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,48(\PTR_B_REG) ++ vlrepg %v1,56(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ ++ vlrepg %v7, 64(\PTR_B_REG) ++ vlrepg %v1,72(\PTR_B_REG) ++ vl %v2, 64(\PTR_A_REG) ++ vl %v3, 80(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,80(\PTR_B_REG) ++ vlrepg %v1,88(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v28,%v2,%v1,%v28 ++ vfmadb %v29,%v3,%v1,%v29 ++ ++ vlrepg %v7, 96(\PTR_B_REG) ++ vlrepg %v1,104(\PTR_B_REG) ++ vl %v2, 96(\PTR_A_REG) ++ vl %v3, 112(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vlrepg %v7,112(\PTR_B_REG) ++ la \PTR_A_REG, 128(\PTR_A_REG) ++ vlrepg %v1,120(\PTR_B_REG) ++ vfmadb %v24,%v2,%v7,%v24 ++ vfmadb %v25,%v3,%v7,%v25 ++ vfmadb %v28,%v2,%v1,%v28 ++ la \PTR_B_REG, 128(\PTR_B_REG) ++ vfmadb %v29,%v3,%v1,%v29 ++.endm ++ ++/*STORE C4X4*/ ++.macro STORE_4x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v2,16(\CIJ_REG) ++ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG) ++ ++ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ ++ /*add c LDC_BYTE*/ ++ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v1,0(\CIJ_REG,LOCAL_VAR1) ++ vfmadb %v1,%v24,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,LOCAL_VAR1) ++ ++ vl %v2,16(\CIJ_REG,LOCAL_VAR1) ++ vfmadb %v2,%v25,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,LOCAL_VAR1) ++ ++ ++ vl %v1,0(\CIJ_REG,LOCAL_VAR2) ++ vfmadb %v1,%v28,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,LOCAL_VAR2) ++ ++ vl %v2,16(\CIJ_REG,LOCAL_VAR2) ++ vfmadb %v2,%v29,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,LOCAL_VAR2) ++ ++ la \CIJ_REG,32(\CIJ_REG) ++.endm ++ ++/*STORE TRMM C4X4*/ ++.macro STORE_TRMM_4x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ vfmdb %v2,%v17,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG) ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ vfmdb %v1,%v20,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v2,%v21,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v1,%v24,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,LOCAL_VAR1) ++ vfmdb %v2,%v25,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,LOCAL_VAR1) ++ vfmdb %v1,%v28,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,LOCAL_VAR2) ++ vfmdb %v2,%v29,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,LOCAL_VAR2) ++ la \CIJ_REG,32(\CIJ_REG) ++.endm ++/**************************************Kernel2x4*************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_2x4 ++ vzero %v1 /*a1b1 a1b2 */ ++ vzero %v2 /*a1b3 a1b4 */ ++ vzero %v6 /*a2b1 a2b2 */ ++ vzero %v7 /*a2b3 a2b4 */ ++.endm ++ ++/*Calculate for 2x4_4 C blocks.This Time BroadCast A. but Load B multiple*/ ++.macro CALC_2x4_4 PTR_A_REG,PTR_B_REG ++ vl %v4, 0(\PTR_B_REG) ++ vl %v5,16(\PTR_B_REG) ++ vlrepg %v3, 0(\PTR_A_REG) ++ vlrepg %v16, 8(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ vfmadb %v6,%v16,%v4,%v6 ++ vfmadb %v7,%v16,%v5,%v7 ++ ++ vl %v4, 32(\PTR_B_REG) ++ vl %v5,48(\PTR_B_REG) ++ vlrepg %v3, 16(\PTR_A_REG) ++ vlrepg %v16, 24(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ vfmadb %v6,%v16,%v4,%v6 ++ vfmadb %v7,%v16,%v5,%v7 ++ ++ vl %v4, 64(\PTR_B_REG) ++ vl %v5,80(\PTR_B_REG) ++ vlrepg %v3, 32(\PTR_A_REG) ++ vlrepg %v16, 40(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ vfmadb %v6,%v16,%v4,%v6 ++ vfmadb %v7,%v16,%v5,%v7 ++ ++ vl %v4, 96(\PTR_B_REG) ++ vl %v5,112(\PTR_B_REG) ++ vlrepg %v3, 48(\PTR_A_REG) ++ vlrepg %v16, 56(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ la \PTR_B_REG, 128(\PTR_B_REG) ++ vfmadb %v6,%v16,%v4,%v6 ++ vfmadb %v7,%v16,%v5,%v7 ++ la \PTR_A_REG, 64(\PTR_A_REG) ++.endm ++ ++/*Calculate for 2x4 C blocks.This Time BroadCast A. but Load B multiple*/ ++.macro CALC_2x4 PTR_A_REG,PTR_B_REG ++ vl %v4, 0(\PTR_B_REG) ++ vl %v5,16(\PTR_B_REG) ++ vlrepg %v3, 0(\PTR_A_REG) ++ vlrepg %v16, 8(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ la \PTR_A_REG, 16(\PTR_A_REG) ++ vfmadb %v6,%v16,%v4,%v6 ++ vfmadb %v7,%v16,%v5,%v7 ++ la \PTR_B_REG, 32(\PTR_B_REG) ++.endm ++ ++.macro STORE_2x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL ++/**/ ++ vfmdb %v1,%v1,\ALPHA_REG ++ vfmdb %v2,%v2,\ALPHA_REG ++ vfmdb %v6,%v6,\ALPHA_REG ++ vfmdb %v7,%v7,\ALPHA_REG ++ vrepg %v4,%v1,1 ++ vrepg %v5,%v6,1 ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ adb %f1, 0(\CIJ_REG) ++ std %f1,0(\CIJ_REG) ++ ++ adb %f6, 8(\CIJ_REG) ++ std %f6,8(\CIJ_REG) ++ ++ adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ adb %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ std %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ /*add LDC_BYTE */ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ vrepg %v4,%v2,1 ++ vrepg %v5,%v7,1 ++ ++ adb %f2,0(\CIJ_REG,LOCAL_VAR1) ++ std %f2,0(\CIJ_REG,LOCAL_VAR1) ++ ++ adb %f7,8(\CIJ_REG,LOCAL_VAR1) ++ std %f7,8(\CIJ_REG,LOCAL_VAR1) ++ ++ adb %f4,0(\CIJ_REG,LOCAL_VAR2) ++ std %f4,0(\CIJ_REG,LOCAL_VAR2) ++ ++ adb %f5,8(\CIJ_REG,LOCAL_VAR2) ++ std %f5,8(\CIJ_REG,LOCAL_VAR2) ++ la \CIJ_REG,16(\CIJ_REG) ++ ++.endm ++ ++.macro STORE_TRMM_2x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL ++/**/ ++ vfmdb %v1,%v1,\ALPHA_REG ++ vfmdb %v2,%v2,\ALPHA_REG ++ vfmdb %v6,%v6,\ALPHA_REG ++ vfmdb %v7,%v7,\ALPHA_REG ++ vrepg %v4,%v1,1 ++ vrepg %v5,%v6,1 ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ std %f1,0(\CIJ_REG) ++ std %f6,8(\CIJ_REG) ++ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ std %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ /*add LDC_BYTE */ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ vrepg %v4,%v2,1 ++ vrepg %v5,%v7,1 ++ std %f2,0(\CIJ_REG,LOCAL_VAR1) ++ std %f7,8(\CIJ_REG,LOCAL_VAR1) ++ std %f4,0(\CIJ_REG,LOCAL_VAR2) ++ std %f5,8(\CIJ_REG,LOCAL_VAR2) ++ la \CIJ_REG,16(\CIJ_REG) ++.endm ++ ++/**************************************Kernel1x4*************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_1x4 ++ vzero %v1 ++ vzero %v2 ++.endm ++/*Calculate for 1x4 C blocks.This Time BroadCast A. but Load B multiple*/ ++.macro CALC_1x4 PTR_A_REG,PTR_B_REG ++ vl %v4, 0(\PTR_B_REG) ++ vl %v5,16(\PTR_B_REG) ++ vlrepg %v3, 0(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ la \PTR_A_REG, 8(\PTR_A_REG) ++ vfmadb %v2,%v3,%v5,%v2 ++ la \PTR_B_REG, 32(\PTR_B_REG) ++.endm ++ ++/*Calculate for 1x4_4 C blocks.This Time BroadCast A. but Load B multiple*/ ++.macro CALC_1x4_4 PTR_A_REG,PTR_B_REG ++ vl %v4, 0(\PTR_B_REG) ++ vl %v5,16(\PTR_B_REG) ++ vlrepg %v3, 0(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ ++ vl %v4, 32(\PTR_B_REG) ++ vl %v5,48(\PTR_B_REG) ++ vlrepg %v3, 8(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ ++ vl %v4, 64(\PTR_B_REG) ++ vl %v5,80(\PTR_B_REG) ++ vlrepg %v3, 16(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ ++ vl %v4, 96(\PTR_B_REG) ++ vl %v5,112(\PTR_B_REG) ++ vlrepg %v3, 24(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ vfmadb %v2,%v3,%v5,%v2 ++ la \PTR_A_REG, 32(\PTR_A_REG) ++ la \PTR_B_REG, 128(\PTR_B_REG) ++.endm ++ ++.macro STORE_1x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL ++/**/ ++ vfmdb %v1,%v1,\ALPHA_REG ++ vfmdb %v2,%v2,\ALPHA_REG ++ vrepg %v4,%v1,1 ++ vrepg %v5,%v2,1 ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ adb %f1, 0(\CIJ_REG) ++ std %f1,0(\CIJ_REG) ++ ++ adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ /*add LDC_BYTE */ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ adb %f2,0(\CIJ_REG,LOCAL_VAR1) ++ std %f2,0(\CIJ_REG,LOCAL_VAR1) ++ adb %f5,0(\CIJ_REG,LOCAL_VAR2) ++ std %f5,0(\CIJ_REG,LOCAL_VAR2) ++ la \CIJ_REG,8(\CIJ_REG) ++ ++.endm ++ ++.macro STORE_TRMM_1x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL ++/**/ ++ vfmdb %v1,%v1,\ALPHA_REG ++ vfmdb %v2,%v2,\ALPHA_REG ++ vrepg %v4,%v1,1 ++ vrepg %v5,%v2,1 ++ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) ++ std %f1,0(\CIJ_REG) ++ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ /*add LDC_BYTE */ ++ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) ++ std %f2,0(\CIJ_REG,LOCAL_VAR1) ++ std %f5,0(\CIJ_REG,LOCAL_VAR2) ++ la \CIJ_REG,8(\CIJ_REG) ++.endm ++/***************************************BN=2 SECTION***************************************/ ++/*************************************Kernel8x2***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_8x2 ++ vzero %v16 ++ vzero %v17 ++ vzero %v18 ++ vzero %v19 ++ vzero %v20 ++ vzero %v21 ++ vzero %v22 ++ vzero %v23 ++ ++.endm ++ ++/*Calculate for 8x2 C blocks*/ ++.macro CALC_8x2 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vl %v4, 32(\PTR_A_REG) ++ vl %v5, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ la \PTR_A_REG, 64(\PTR_A_REG) ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ la \PTR_B_REG, 16(\PTR_B_REG) ++.endm ++ ++ ++/*Calculate for 8x2_4 C blocks*/ ++.macro CALC_8x2_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vl %v4, 32(\PTR_A_REG) ++ vl %v5, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ ++ vlrepg %v7, 16(\PTR_B_REG) ++ vlrepg %v1,24(\PTR_B_REG) ++ vl %v2, 64(\PTR_A_REG) ++ vl %v3, 80(\PTR_A_REG) ++ vl %v4, 96(\PTR_A_REG) ++ vl %v5, 112(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ ++ vlrepg %v7, 32(\PTR_B_REG) ++ vlrepg %v1,40(\PTR_B_REG) ++ vl %v2, 128(\PTR_A_REG) ++ vl %v3, 144(\PTR_A_REG) ++ vl %v4, 160(\PTR_A_REG) ++ vl %v5, 176(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ ++ vlrepg %v7, 48(\PTR_B_REG) ++ vlrepg %v1,56(\PTR_B_REG) ++ vl %v2, 192(\PTR_A_REG) ++ vl %v3, 208(\PTR_A_REG) ++ vl %v4, 224(\PTR_A_REG) ++ vl %v5, 240(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ la \PTR_B_REG, 64(\PTR_B_REG) ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ vfmadb %v22,%v4,%v1,%v22 ++ vfmadb %v23,%v5,%v1,%v23 ++ la \PTR_A_REG, 256(\PTR_A_REG) ++.endm ++ ++/*STORE C8X2*/ ++.macro STORE_8x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v2,16(\CIJ_REG) ++ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG) ++ ++ vl %v3,32(\CIJ_REG) ++ vfmadb %v3,%v18,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG) ++ ++ vl %v4,48(\CIJ_REG) ++ vfmadb %v4,%v19,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG) ++ ++ ++ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ ++ vl %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v3,%v22,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v4,%v23,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ ++ la \CIJ_REG,64(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C8X2*/ ++.macro STORE_TRMM_8x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ vfmdb %v2,%v17,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG) ++ vfmdb %v3,%v18,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG) ++ vfmdb %v4,%v19,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG) ++ vfmdb %v1,%v20,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v2,%v21,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v3,%v22,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v4,%v23,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ la \CIJ_REG,64(\CIJ_REG) ++.endm ++ ++/*************************************Kernel4x2***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_4x2 ++ vzero %v16 ++ vzero %v17 ++ vzero %v20 ++ vzero %v21 ++ ++.endm ++ ++/*Calculate for 4x2 C blocks*/ ++.macro CALC_4x2 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ la \PTR_A_REG, 32(\PTR_A_REG) ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ la \PTR_B_REG, 16(\PTR_B_REG) ++.endm ++ ++/*Calculate for 4x2_4 C blocks*/ ++.macro CALC_4x2_4 PTR_A_REG,PTR_B_REG ++ ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ ++ vlrepg %v7, 16(\PTR_B_REG) ++ vlrepg %v1,24(\PTR_B_REG) ++ vl %v2, 32(\PTR_A_REG) ++ vl %v3, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ ++ vlrepg %v7, 32(\PTR_B_REG) ++ vlrepg %v1,40(\PTR_B_REG) ++ vl %v2, 64(\PTR_A_REG) ++ vl %v3, 80(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ ++ ++ vlrepg %v7, 48(\PTR_B_REG) ++ vlrepg %v1,56(\PTR_B_REG) ++ vl %v2, 96(\PTR_A_REG) ++ vl %v3, 112(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ la \PTR_B_REG, 64(\PTR_B_REG) ++ vfmadb %v20,%v2,%v1,%v20 ++ vfmadb %v21,%v3,%v1,%v21 ++ la \PTR_A_REG, 128(\PTR_A_REG) ++.endm ++ ++ ++/*STORE C4x2*/ ++.macro STORE_4x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v2,16(\CIJ_REG) ++ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG) ++ ++ ++ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ la \CIJ_REG,32(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C4x2*/ ++.macro STORE_TRMM_4x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ vfmdb %v2,%v17,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG) ++ vfmdb %v1,%v20,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmdb %v2,%v21,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ la \CIJ_REG,32(\CIJ_REG) ++.endm ++ ++/*************************************Kernel2x2***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_2x2 ++ vzero %v16 ++ vzero %v20 ++ ++.endm ++ ++/*Calculate for 2x2 C blocks*/ ++.macro CALC_2x2 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ la \PTR_A_REG, 16(\PTR_A_REG) ++ vfmadb %v20,%v2,%v1,%v20 ++ la \PTR_B_REG, 16(\PTR_B_REG) ++.endm ++ ++/*Calculate for 2x2_4 C blocks*/ ++.macro CALC_2x2_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vlrepg %v1,8(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v20,%v2,%v1,%v20 ++ ++ vlrepg %v7, 16(\PTR_B_REG) ++ vlrepg %v1,24(\PTR_B_REG) ++ vl %v2, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v20,%v2,%v1,%v20 ++ ++ vlrepg %v7, 32(\PTR_B_REG) ++ vlrepg %v1,40(\PTR_B_REG) ++ vl %v2, 32(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v20,%v2,%v1,%v20 ++ ++ ++ vlrepg %v7, 48(\PTR_B_REG) ++ vlrepg %v1,56(\PTR_B_REG) ++ vl %v2, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v20,%v2,%v1,%v20 ++ ++ la \PTR_B_REG, 64(\PTR_B_REG) ++ la \PTR_A_REG, 64(\PTR_A_REG) ++.endm ++ ++/*STORE C2x2*/ ++.macro STORE_2x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ la \CIJ_REG,16(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C2x2*/ ++.macro STORE_TRMM_2x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ vfmdb %v1,%v20,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ la \CIJ_REG,16(\CIJ_REG) ++.endm ++ ++/**************************************Kernel1x2*************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_1x2 ++ vzero %v1 ++.endm ++/*Calculate for 1x2 C blocks.This Time BroadCast A. but Load B multiple*/ ++.macro CALC_1x2 PTR_A_REG,PTR_B_REG ++ vl %v4, 0(\PTR_B_REG) ++ vlrepg %v3, 0(\PTR_A_REG) ++ la \PTR_B_REG, 16(\PTR_B_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ la \PTR_A_REG, 8(\PTR_A_REG) ++.endm ++ ++.macro CALC_1x2_4 PTR_A_REG,PTR_B_REG ++ vl %v4, 0(\PTR_B_REG) ++ vlrepg %v3, 0(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ ++ vl %v4, 16(\PTR_B_REG) ++ vlrepg %v3, 8(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ ++ vl %v4, 32(\PTR_B_REG) ++ vlrepg %v3, 16(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ ++ vl %v4, 48(\PTR_B_REG) ++ vlrepg %v3, 24(\PTR_A_REG) ++ vfmadb %v1,%v3,%v4,%v1 ++ ++ la \PTR_B_REG, 64(\PTR_B_REG) ++ la \PTR_A_REG, 32(\PTR_A_REG) ++.endm ++ ++.macro STORE_1x2 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL ++/**/ ++ vfmdb %v1,%v1,\ALPHA_REG ++ vrepg %v4,%v1,1 ++ adb %f1, 0(\CIJ_REG) ++ std %f1,0(\CIJ_REG) ++ ++ adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ ++ la \CIJ_REG,8(\CIJ_REG) ++ ++.endm ++ ++.macro STORE_TRMM_1x2 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL ++/**/ ++ vfmdb %v1,%v1,\ALPHA_REG ++ vrepg %v4,%v1,1 ++ std %f1,0(\CIJ_REG) ++ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) ++ la \CIJ_REG,8(\CIJ_REG) ++.endm ++ ++/**************************************BN=1*******************************************************/ ++/*************************************Kernel8x1***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_8x1 ++ vzero %v16 ++ vzero %v17 ++ vzero %v18 ++ vzero %v19 ++.endm ++/*Calculate for 8x1 C blocks*/ ++.macro CALC_8x1 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vl %v4, 32(\PTR_A_REG) ++ vl %v5, 48(\PTR_A_REG) ++ la \PTR_B_REG, 8(\PTR_B_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ la \PTR_A_REG, 64(\PTR_A_REG) ++ vfmadb %v19,%v5,%v7,%v19 ++.endm ++ ++/*Calculate for 8x1_4 C blocks*/ ++.macro CALC_8x1_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vl %v4, 32(\PTR_A_REG) ++ vl %v5, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ ++ vlrepg %v7, 8(\PTR_B_REG) ++ vl %v2, 64(\PTR_A_REG) ++ vl %v3, 80(\PTR_A_REG) ++ vl %v4, 96(\PTR_A_REG) ++ vl %v5, 112(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ ++ vlrepg %v7, 16(\PTR_B_REG) ++ vl %v2, 128(\PTR_A_REG) ++ vl %v3, 144(\PTR_A_REG) ++ vl %v4, 160(\PTR_A_REG) ++ vl %v5, 176(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ ++ vlrepg %v7, 24(\PTR_B_REG) ++ vl %v2, 192(\PTR_A_REG) ++ vl %v3, 208(\PTR_A_REG) ++ vl %v4, 224(\PTR_A_REG) ++ vl %v5, 240(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ vfmadb %v18,%v4,%v7,%v18 ++ vfmadb %v19,%v5,%v7,%v19 ++ ++ ++ la \PTR_A_REG, 256(\PTR_A_REG) ++ la \PTR_B_REG, 32(\PTR_B_REG) ++.endm ++ ++/*STORE C8X1*/ ++.macro STORE_8x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v2,16(\CIJ_REG) ++ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG) ++ ++ vl %v3,32(\CIJ_REG) ++ vfmadb %v3,%v18,\ALPHA_VECREG,%v3 ++ vst %v3,32(\CIJ_REG) ++ ++ vl %v4,48(\CIJ_REG) ++ vfmadb %v4,%v19,\ALPHA_VECREG,%v4 ++ vst %v4,48(\CIJ_REG) ++ ++ la \CIJ_REG,64(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C8X1*/ ++.macro STORE_TRMM_8x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ vfmdb %v2,%v17,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG) ++ vfmdb %v3,%v18,\ALPHA_VECREG ++ vst %v3,32(\CIJ_REG) ++ vfmdb %v4,%v19,\ALPHA_VECREG ++ vst %v4,48(\CIJ_REG) ++ la \CIJ_REG,64(\CIJ_REG) ++.endm ++ ++ ++/*************************************Kernel4x1***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_4x1 ++ vzero %v16 ++ vzero %v17 ++.endm ++/*Calculate for 4x1 C blocks*/ ++.macro CALC_4x1 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ la \PTR_B_REG, 8(\PTR_B_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ la \PTR_A_REG, 32(\PTR_A_REG) ++.endm ++ ++/*Calculate for 4x1_4 C blocks*/ ++.macro CALC_4x1_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vl %v3, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ ++ vlrepg %v7, 8(\PTR_B_REG) ++ vl %v2, 32(\PTR_A_REG) ++ vl %v3, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ ++ vlrepg %v7, 16(\PTR_B_REG) ++ vl %v2, 64(\PTR_A_REG) ++ vl %v3, 80(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ ++ vlrepg %v7, 24(\PTR_B_REG) ++ vl %v2, 96(\PTR_A_REG) ++ vl %v3, 112(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ vfmadb %v17,%v3,%v7,%v17 ++ ++ la \PTR_B_REG, 32(\PTR_B_REG) ++ la \PTR_A_REG, 128(\PTR_A_REG) ++.endm ++ ++/*STORE C4X1*/ ++.macro STORE_4x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ vl %v2,16(\CIJ_REG) ++ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 ++ vst %v2,16(\CIJ_REG) ++ ++ ++ la \CIJ_REG,32(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C4X1*/ ++.macro STORE_TRMM_4x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ vfmdb %v2,%v17,\ALPHA_VECREG ++ vst %v2,16(\CIJ_REG) ++ la \CIJ_REG,32(\CIJ_REG) ++.endm ++/*************************************Kernel2x1***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_2x1 ++ vzero %v16 ++.endm ++/*Calculate for 2x1 C blocks*/ ++.macro CALC_2x1 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ la \PTR_B_REG, 8(\PTR_B_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ la \PTR_A_REG, 16(\PTR_A_REG) ++.endm ++ ++/*Calculate for 2x1_4 C blocks*/ ++.macro CALC_2x1_4 PTR_A_REG,PTR_B_REG ++ vlrepg %v7, 0(\PTR_B_REG) ++ vl %v2, 0(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ ++ vlrepg %v7, 8(\PTR_B_REG) ++ vl %v2, 16(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ ++ vlrepg %v7, 16(\PTR_B_REG) ++ vl %v2, 32(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ ++ vlrepg %v7, 24(\PTR_B_REG) ++ vl %v2, 48(\PTR_A_REG) ++ vfmadb %v16,%v2,%v7,%v16 ++ ++ la \PTR_B_REG, 32(\PTR_B_REG) ++ la \PTR_A_REG, 64(\PTR_A_REG) ++.endm ++ ++/*STORE C2X1*/ ++.macro STORE_2x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ ++ vl %v1,0(\CIJ_REG) ++ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 ++ vst %v1,0(\CIJ_REG) ++ ++ la \CIJ_REG,16(\CIJ_REG) ++ ++.endm ++ ++/*STORE TRMM C2X1*/ ++.macro STORE_TRMM_2x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL ++ vfmdb %v1,%v16,\ALPHA_VECREG ++ vst %v1,0(\CIJ_REG) ++ la \CIJ_REG,16(\CIJ_REG) ++.endm ++/*************************************Kernel1x1***************************************************/ ++/*Zero C block Vectors*/ ++.macro ZERO_CVEC_1x1 ++ LZDR %f1 ++.endm ++/*Calculate for 1x1 C blocks*/ ++.macro CALC_1x1 PTR_A_REG,PTR_B_REG ++ ld %f2,0(\PTR_A_REG) /**a*/ ++ la \PTR_A_REG,8(\PTR_A_REG) ++ madb %f1,%f2,0(\PTR_B_REG) ++ la \PTR_B_REG,8(\PTR_B_REG) ++.endm ++ ++/*Calculate for 1x1_4 C blocks*/ ++.macro CALC_1x1_4 PTR_A_REG,PTR_B_REG ++ ld %f2,0(\PTR_A_REG) /**a*/ ++ madb %f1,%f2,0(\PTR_B_REG) ++ ++ ld %f2,8(\PTR_A_REG) /**a*/ ++ madb %f1,%f2,8(\PTR_B_REG) ++ ++ ld %f2,16(\PTR_A_REG) /**a*/ ++ madb %f1,%f2,16(\PTR_B_REG) ++ ++ ld %f2,24(\PTR_A_REG) /**a*/ ++ madb %f1,%f2,24(\PTR_B_REG) ++ ++ la \PTR_A_REG,32(\PTR_A_REG) ++ la \PTR_B_REG,32(\PTR_B_REG) ++.endm ++ ++/*STORE C1X1*/ ++.macro STORE_1x1 ALPHA_FLOAT,CIJ_REG,LDC_BYTE_ORIGINAL ++ ld %f2,0(CIJ_LOCAL) ++ madbr %f2,%f1,\ALPHA_FLOAT ++ std %f2,0(CIJ_LOCAL) ++ la \CIJ_REG,8(\CIJ_REG) ++.endm ++ ++/*STORE C1X1*/ ++.macro STORE_TRMM_1x1 ALPHA_FLOAT,CIJ_REG,LDC_BYTE_ORIGINAL ++ mdbr %f1,\ALPHA_FLOAT ++ std %f1,0(CIJ_LOCAL) ++ la \CIJ_REG,8(\CIJ_REG) ++.endm ++ ++ ++/****************************TRMM POINTER REFRESH MACROSES*************************/ ++ ++.macro RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B ++ #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) ++ /* ptrbb = bb;*/ ++ lgr \PTR_B,\B_VAL /*refresh BPOINT*/ ++ ++ #else ++ /* ptrba =ptrba+ off*C_A; ++ ptrbb = bb + off*C_B;*/ ++.if \C_B==4 ++ .if \C_A==8 ++ sllg \PTR_B, \OFF_VAL,5 ++ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*4*/ ++ agr \PTR_A,\PTR_B /*ptrba+off*4**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) ++ .elseif \C_A==4 ++ sllg \PTR_B, \OFF_VAL,5 ++ agr \PTR_A,\PTR_B /*ptrba+off*4**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .elseif \C_A==2 ++ sllg \PTR_B, \OFF_VAL,4 ++ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/ ++ agr \PTR_B, \PTR_B ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ ++ .elseif \C_A==1 ++ sllg \PTR_B, \OFF_VAL,3 ++ agr \PTR_A,\PTR_B /*ptrba+off*4**/ ++ sllg \PTR_B, \OFF_VAL,5 ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .endif ++ ++.elseif \C_B==2 ++ .if \C_A==8 ++ sllg \PTR_B, \OFF_VAL,6 ++ agr \PTR_A,\PTR_B /*ptrba+off*8**/ ++ sllg \PTR_B, \OFF_VAL,4 ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .elseif \C_A==4 ++ sllg \PTR_B, \OFF_VAL,4 ++ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/ ++ agr \PTR_A,\PTR_B /*ptrba+off*2**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .elseif \C_A==2 ++ sllg \PTR_B, \OFF_VAL,4 ++ agr \PTR_A,\PTR_B /*ptrba+off*2**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .elseif \C_A==1 ++ sllg \PTR_B, \OFF_VAL,3 ++ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/ ++ agr \PTR_B,\PTR_B /* off+off**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .endif ++ ++.elseif \C_B==1 ++ .if \C_A==8 ++ sllg \PTR_B, \OFF_VAL,6 ++ agr \PTR_A,\PTR_B /*ptrba+off*8**/ ++ sllg \PTR_B, \OFF_VAL,3 ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .elseif \C_A==4 ++ sllg \PTR_B, \OFF_VAL,5 ++ agr \PTR_A,\PTR_B /*ptrba+off*4**/ ++ sllg \PTR_B, \OFF_VAL,3 ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .elseif \C_A==2 ++ sllg \PTR_B, \OFF_VAL,3 ++ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/ ++ agr \PTR_A,\PTR_B /*ptrba+off*1**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ ++ .elseif \C_A==1 ++ sllg \PTR_B, \OFF_VAL,3 ++ agr \PTR_A,\PTR_B /*ptrba+off*1**/ ++ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ ++ .endif ++.endif ++ ++ ++ #endif ++.endm ++ ++/**/ ++.macro RefreshTempBk TEMP_VAL,BK_VAL,OFF_VAL,INCR_A,INCR_B ++ #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) ++ /* temp = bk-off;*/ ++ sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL ++ ++ #elif defined(LEFT) ++ /* temp = off+INCR_A; // number of values in A */ ++ la \TEMP_VAL,\INCR_A(\OFF_VAL) ++ #else ++ /* temp = off+INCR_B // number of values in B*/ ++ la \TEMP_VAL,\INCR_B(\OFF_VAL) ++ #endif ++ ++.endm ++ ++ ++.macro RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,PTR_B,PTR_A,C_A,C_B ++ ++ #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) ++ /*temp = bk - off;*/ ++ sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL ++ #ifdef LEFT ++ /*temp -= 8; // number of values in A*/ ++ lay \TEMP_VAL,-\C_A(\TEMP_VAL) ++ #else ++ /*temp -= 4; // number of values in B*/ ++ lay \TEMP_VAL,-\C_B(\TEMP_VAL) ++ #endif ++ /*ptrba += temp*C_A; ++ ptrbb += temp*C_B;*/ ++ .if \C_B==4 ++ .if \C_A==8 ++ sllg \TEMP_VAL, \TEMP_VAL,5 /*temp*4*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==4 ++ sllg \TEMP_VAL, \TEMP_VAL,5 /*temp*4*/ ++ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==2 ++ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ ++ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ ++ .elseif \C_A==1 ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ sllg \TEMP_VAL, \TEMP_VAL,2 /*temp*2*2*/ ++ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ ++ .endif ++ .elseif \C_B==2 ++ .if \C_A==8 ++ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ sllg \TEMP_VAL, \TEMP_VAL,2 /*temp*2*4 */ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==4 ++ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ agr \TEMP_VAL, \TEMP_VAL ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==2 ++ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ ++ .elseif \C_A==1 ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ .endif ++ .elseif \C_B==1 ++ .if \C_A==8 ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*8 */ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==4 ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ sllg \TEMP_VAL, \TEMP_VAL,2 /*temp*1*4 */ ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==2 ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ agr \TEMP_VAL, \TEMP_VAL ++ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ ++ .elseif \C_A==1 ++ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ ++ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ ++ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ ++ .endif ++ .endif ++ #endif ++ ++ #ifdef LEFT ++ /*off += 8; // number of values in A*/ ++ aghi \OFF_VAL,\C_A ++ #endif ++.endm +\ No newline at end of file +diff --git a/kernel/zarch/trmm8x4V.S b/kernel/zarch/trmm8x4V.S +new file mode 100644 +index 00000000..8e6a03c1 +--- /dev/null ++++ b/kernel/zarch/trmm8x4V.S +@@ -0,0 +1,877 @@ ++/*************************************************************************** ++Copyright (c) 2013-2017, The OpenBLAS Project ++All rights reserved. ++Redistribution and use in source and binary forms, with or without ++modification, are permitted provided that the following conditions are ++met: ++1. Redistributions of source code must retain the above copyright ++notice, this list of conditions and the following disclaimer. ++2. Redistributions in binary form must reproduce the above copyright ++notice, this list of conditions and the following disclaimer in ++the documentation and/or other materials provided with the ++distribution. ++3. Neither the name of the OpenBLAS project nor the names of ++its contributors may be used to endorse or promote products ++derived from this software without specific prior written permission. ++THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE ++LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ++DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ++OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE ++USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++*****************************************************************************/ ++ ++/************************************************************************************** ++* 2017/01/01 AbdelRauf (quickwritereader@gmail.com) ++* BLASTEST : OK ++* CTEST : OK ++* TEST : OK ++**************************************************************************************/ ++ ++/*********************************************************************/ ++/* Copyright 2009, 2010 The University of Texas at Austin. */ ++/* All rights reserved. */ ++/* */ ++/* Redistribution and use in source and binary forms, with or */ ++/* without modification, are permitted provided that the following */ ++/* conditions are met: */ ++/* */ ++/* 1. Redistributions of source code must retain the above */ ++/* copyright notice, this list of conditions and the following */ ++/* disclaimer. */ ++/* */ ++/* 2. Redistributions in binary form must reproduce the above */ ++/* copyright notice, this list of conditions and the following */ ++/* disclaimer in the documentation and/or other materials */ ++/* provided with the distribution. */ ++/* */ ++/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ ++/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ ++/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ ++/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ ++/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ ++/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ ++/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ ++/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ ++/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ ++/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ ++/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ ++/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ ++/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ ++/* POSSIBILITY OF SUCH DAMAGE. */ ++/* */ ++/* The views and conclusions contained in the software and */ ++/* documentation are those of the authors and should not be */ ++/* interpreted as representing official policies, either expressed */ ++/* or implied, of The University of Texas at Austin. */ ++/*********************************************************************/ ++ ++#define ASSEMBLER ++#include "common.h" ++ ++/************** Notes ON IBM abi and IBM assembly********************************************** ++* General registers r0 and r1 should be used internally whenever possible ++* General registers r2 to r5 should be second choice ++* General registers r12 to r15 should only be used for their standard function. ++* r0 should not be used as address disp register ++ ++#BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc ++ ##bm=r2,bn=r3, bk=r4, alpha=f0,ba=r5,bb=r6,stack[160] ,ldc=stack[168] ++offset=stack[176] ++**********************************************************************************************/ ++ ++ ++#define BM %r2 ++#define BM_CUR %r0 ++#define BN %r3 ++#define BN_CUR %r10 ++#define BK %r4 ++#define LDC_BYTE %r8 ++#define ALPHA %f0 ++#define ALPHA_VECT %v0 ++#define LOCAL_VAR1 %r9 ++#define LOCAL_VAR2 %r1 ++#define LOCAL_VAR3 %r11 ++#define A %r5 ++#define B %r6 ++#define CIJ %r7 ++#define CIJ_LOCAL %r12 ++#define OFF %r13 ++#define OFFSET %f8 ++#define ALIGN_4 .align 16 ++#define ALIGN_2 .align 8 ++#define PREFETCH_INS 1 ++ ++/**************************Include kernel helper macrosses**********************************/ ++#include "kernelMacros.S" ++ ++#if defined (TRMMKERNEL) ++ ++#define STORE_8x4 STORE_TRMM_8x4 ++#define STORE_4x4 STORE_TRMM_4x4 ++#define STORE_2x4 STORE_TRMM_2x4 ++#define STORE_1x4 STORE_TRMM_1x4 ++ ++#define STORE_8x2 STORE_TRMM_8x2 ++#define STORE_4x2 STORE_TRMM_4x2 ++#define STORE_2x2 STORE_TRMM_2x2 ++#define STORE_1x2 STORE_TRMM_1x2 ++ ++#define STORE_8x1 STORE_TRMM_8x1 ++#define STORE_4x1 STORE_TRMM_4x1 ++#define STORE_2x1 STORE_TRMM_2x1 ++#define STORE_1x1 STORE_TRMM_1x1 ++ ++#endif ++ ++/***********************************DGEMM***********************************************************/ ++ ++PROLOGUE ++#if defined(TRMMKERNEL) ++stmg %r6,%r13,40(%r15) ++#else ++stmg %r6,%r12,40(%r15) ++#endif ++lg CIJ, 160(%r15) ++lg LOCAL_VAR1, 168(%r15) ++#if defined(TRMMKERNEL) ++lg OFF,176(%r15) ++std OFFSET,32(%r15) ++ldgr OFFSET ,OFF ++#endif ++srlg BN_CUR,BN,2 ++vrepg ALPHA_VECT,ALPHA_VECT,0 /*replicate alpha which in f0*/ ++ ++sllg LDC_BYTE, LOCAL_VAR1,3 /*calculate lcd stride with bytes double=8 x<<3 */ ++#if defined(TRMMKERNEL) && !defined(LEFT) ++ /*off = -offset;*/ ++ lgdr LOCAL_VAR1,OFFSET ++ lcgr OFF,LOCAL_VAR1 ++#endif ++cijle BN_CUR,0,.LX2 ++ ++ALIGN_4 ++.LX4_BN: ++#if defined(PREFETCH_INS) ++ pfd 1, 0(A) ++ pfd 1, 256(A) ++ pfd 1, 0(B) ++ pfd 1, 256(B) ++#endif ++#if defined(TRMMKERNEL) && defined(LEFT) ++ /*off = offset;*/ ++ lgdr OFF,OFFSET ++#endif ++srlg BM_CUR,BM,3 ++lgr LOCAL_VAR3,A ++lgr CIJ_LOCAL,CIJ ++cijle BM_CUR,0,.L4x4 ++ALIGN_4 ++.L8x4_BM: /*BM_CUR LOOP */ ++ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,4 ++ ++ RefreshTempBk LOCAL_VAR1,BK,OFF,8,4 ++ srl LOCAL_VAR1,2 ++ ++#else ++ srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++ lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ ++ZERO_CVEC_8x4 ++cijle LOCAL_VAR1,0,.L8x4_mod ++ ++ ++ALIGN_4 ++.L8x4_4_BK: /*BK_CUR LOOP */ ++#if defined(PREFETCH_INS) ++ pfd 1, 512(LOCAL_VAR3) ++#endif ++ CALC_8x4_4 LOCAL_VAR3,LOCAL_VAR2 ++#if defined(PREFETCH_INS) ++ pfd 1, 512(LOCAL_VAR2) ++#endif ++brctg LOCAL_VAR1,.L8x4_4_BK ++ ++ALIGN_4 ++.L8x4_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,8,4 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L8x4_BK_Store ++ ++ALIGN_4 ++.L8x4_BK: /*BK_CUR LOOP */ ++ CALC_8x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x4_BK ++ ++ALIGN_4 ++.L8x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_8x4 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ /*RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,L_VAR,PTR_A,C_A*/ ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,4 ++#endif ++brctg BM_CUR,.L8x4_BM ++ ++ALIGN_4 ++.L4x4: ++ ++tmll BM,4 ++jz .L2x4 ++ ++ALIGN_4 ++.L4x4_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,4,4 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 ++ srl LOCAL_VAR1,2 ++ ++#else ++ srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++ lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_4x4 ++cijle LOCAL_VAR1,0,.L4x4_mod ++ ++ALIGN_4 ++.L4x4_4_BK: /*BK_CUR LOOP */ ++ CALC_4x4_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x4_4_BK ++ ++ALIGN_4 ++.L4x4_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 ++ nill LOCAL_VAR1,3 ++#else ++ la LOCAL_VAR1,3(0,0) ++ NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L4x4_BK_Store ++ ++ALIGN_4 ++.L4x4_BK: /*BK_CUR LOOP */ ++ CALC_4x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x4_BK ++ ++ALIGN_4 ++.L4x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_4x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,4 ++#endif ++ALIGN_2 ++.L2x4: ++ ++tmll BM,2 ++jz .L1x4 ++ ++ALIGN_4 ++.L2x4_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,4 ++ ++ RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_2x4 ++cijle LOCAL_VAR1,0,.L2x4_mod ++ ++ALIGN_4 ++.L2x4_4_BK: /*BK_CUR LOOP */ ++ CALC_2x4_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x4_4_BK ++ ++ALIGN_4 ++.L2x4_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L2x4_BK_Store ++ ++ALIGN_4 ++.L2x4_BK: /*BK_CUR LOOP */ ++ CALC_2x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x4_BK ++ ++ALIGN_4 ++.L2x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_2x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,4 ++#endif ++ ++ALIGN_4 ++.L1x4: ++ ++tmll BM,1 ++jz .Lx4_INNER_END ++ ++ALIGN_4 ++.L1x4_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,1,4 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_1x4 ++cijle LOCAL_VAR1,0,.L1x4_mod ++ ++ALIGN_4 ++.L1x4_4_BK: /*BK_CUR LOOP */ ++ CALC_1x4_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x4_4_BK ++ ++ALIGN_4 ++.L1x4_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L1x4_BK_Store ++ ++ALIGN_4 ++.L1x4_BK: /*BK_CUR LOOP */ ++ CALC_1x4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x4_BK ++ ++ALIGN_4 ++.L1x4_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_1x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,4 ++#endif ++ALIGN_2 ++.Lx4_INNER_END: ++ ++ ++/*add LDC_BYTE_COPY to new*/ ++sllg LOCAL_VAR1,LDC_BYTE,2 /*multiply*4 */ ++#if defined(TRMMKERNEL) && !defined(LEFT) ++ aghi OFF,4 ++#endif ++sllg LOCAL_VAR2,BK,5 /*muyliply*4*sizeof(double) =multiply*32* 2**5 */ ++la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ ++la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ ++ ++brctg BN_CUR,.LX4_BN ++ ++/*********************************X2 SECTION************************************************/ ++ALIGN_4 ++.LX2: ++tmll BN,2 ++jz .Lx1 ++ ++ALIGN_4 ++.Lx2_BN: ++ ++#if defined(TRMMKERNEL) && defined(LEFT) ++ /*off = offset;*/ ++ lgdr OFF,OFFSET ++#endif ++ ++srlg BM_CUR,BM,3 ++lgr LOCAL_VAR3,A ++lgr CIJ_LOCAL,CIJ ++cijle BM_CUR,0,.L4x2 ++ ++ ++ALIGN_4 ++.L8x2_BM: /*BM_CUR LOOP */ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,2 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,8,2 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_8x2 ++cijle LOCAL_VAR1,0,.L8x2_mod ++ ++ALIGN_4 ++.L8x2_4_BK: /*BK_CUR LOOP */ ++#if defined(PREFETCH_INS) ++ pfd 1, 256(LOCAL_VAR3) ++ pfd 1,64(LOCAL_VAR2) ++#endif ++ CALC_8x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x2_4_BK ++ ++ALIGN_4 ++.L8x2_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,8,2 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L8x2_BK_Store ++ ++ALIGN_4 ++.L8x2_BK: /*BK_CUR LOOP */ ++ CALC_8x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x2_BK ++ ++ALIGN_4 ++.L8x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_8x2 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,2 ++#endif ++ALIGN_4 ++brctg BM_CUR,.L8x2_BM ++ ++ALIGN_2 ++.L4x2: ++ ++tmll BM,4 ++jz .L2x2 ++ ++ALIGN_4 ++.L4x2_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,4,2 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_4x2 ++cijle LOCAL_VAR1,0,.L4x2_mod ++ ++ALIGN_4 ++.L4x2_4_BK: /*BK_CUR LOOP */ ++ CALC_4x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x2_4_BK ++ ++ALIGN_4 ++.L4x2_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L4x2_BK_Store ++ ++ALIGN_4 ++.L4x2_BK: /*BK_CUR LOOP */ ++ CALC_4x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x2_BK ++ ++ALIGN_4 ++.L4x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_4x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,2 ++#endif ++ALIGN_2 ++.L2x2: ++ ++tmll BM,2 ++jz .L1x2 ++ ++ALIGN_4 ++.L2x2_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,2 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_2x2 ++cijle LOCAL_VAR1,0,.L2x2_mod ++ ++ALIGN_4 ++.L2x2_4_BK: /*BK_CUR LOOP */ ++ CALC_2x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x2_4_BK ++ ++ALIGN_4 ++.L2x2_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L2x2_BK_Store ++ ++ALIGN_4 ++.L2x2_BK: /*BK_CUR LOOP */ ++ CALC_2x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x2_BK ++ ++ALIGN_4 ++.L2x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_2x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,2 ++#endif ++ ++ALIGN_2 ++.L1x2: ++ ++tmll BM,1 ++jz .Lx2_INNER_END ++ ++ALIGN_4 ++.L1x2_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,1,2 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_1x2 ++cijle LOCAL_VAR1,0,.L1x2_mod ++ ++ALIGN_4 ++.L1x2_4_BK: /*BK_CUR LOOP */ ++ CALC_1x2_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x2_4_BK ++ ++ALIGN_4 ++.L1x2_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L1x2_BK_Store ++ ++ALIGN_4 ++.L1x2_BK: /*BK_CUR LOOP */ ++ CALC_1x2 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x2_BK ++ ++ALIGN_4 ++.L1x2_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_1x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,2 ++#endif ++ALIGN_2 ++.Lx2_INNER_END: ++/*add LDC_BYTE_COPY to new*/ ++la LOCAL_VAR1,0(LDC_BYTE,LDC_BYTE) /*multiply*2 */ ++sllg LOCAL_VAR2,BK,4 /*muyliply*2*sizeof(double) =multiply*16* 2**4 */ ++la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ ++#if defined(TRMMKERNEL) && !defined(LEFT) ++ aghi OFF,2 ++#endif ++la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ ++ ++ ++ ++ ++/*********************************X1 SECTION************************************************/ ++ALIGN_2 ++.Lx1: ++tmll BN,1 ++jz .L_FUNC_END ++ ++ALIGN_4 ++.Lx1_BN: ++ ++#if defined(TRMMKERNEL) && defined(LEFT) ++ /*off = offset;*/ ++ lgdr OFF,OFFSET ++#endif ++srlg BM_CUR,BM,3 ++lgr LOCAL_VAR3,A ++lgr CIJ_LOCAL,CIJ ++cijle BM_CUR,0,.L4x1 ++ ++ ++ALIGN_4 ++.L8x1_BM: /*BM_CUR LOOP */ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,1 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,8,1 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_8x1 ++cijle LOCAL_VAR1,0,.L8x1_mod ++ ++ALIGN_4 ++.L8x1_4_BK: /*BK_CUR LOOP */ ++#if defined(PREFETCH_INS) ++ pfd 1, 256(LOCAL_VAR3) ++#endif ++ CALC_8x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x1_4_BK ++ ++ALIGN_4 ++.L8x1_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,8,1 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L8x1_BK_Store ++ ++ALIGN_4 ++.L8x1_BK: /*BK_CUR LOOP */ ++ CALC_8x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L8x1_BK ++ ++ALIGN_4 ++.L8x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_8x1 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE ++ #if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,1 ++#endif ++ALIGN_4 ++brctg BM_CUR,.L8x1_BM ++ ++ALIGN_2 ++.L4x1: ++ ++tmll BM,4 ++jz .L2x1 ++ ++ALIGN_4 ++.L4x1_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,4,1 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_4x1 ++cijle LOCAL_VAR1,0,.L4x1_mod ++ ++ALIGN_4 ++.L4x1_4_BK: /*BK_CUR LOOP */ ++ CALC_4x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x1_4_BK ++ ++ALIGN_4 ++.L4x1_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L4x1_BK_Store ++ ++ALIGN_4 ++.L4x1_BK: /*BK_CUR LOOP */ ++ CALC_4x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L4x1_BK ++ ++ALIGN_4 ++.L4x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_4x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++ #if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,1 ++#endif ++ALIGN_2 ++.L2x1: ++ ++tmll BM,2 ++jz .L1x1 ++ ++ALIGN_4 ++.L2x1_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,1 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_2x1 ++cijle LOCAL_VAR1,0,.L2x1_mod ++ ++ALIGN_4 ++.L2x1_4_BK: /*BK_CUR LOOP */ ++ CALC_2x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x1_4_BK ++ ++ALIGN_4 ++.L2x1_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L2x1_BK_Store ++ ++ALIGN_4 ++.L2x1_BK: /*BK_CUR LOOP */ ++ CALC_2x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L2x1_BK ++ ++ALIGN_4 ++.L2x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_2x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,1 ++#endif ++ ++ALIGN_2 ++.L1x1: ++ ++tmll BM, 1 ++jz .Lx1_INNER_END ++ ++ALIGN_4 ++.L1x1_BM: /*BM start*/ ++#if defined(TRMMKERNEL) ++ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ ++ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,1,1 ++ RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 ++ srl LOCAL_VAR1,2 ++ ++#else ++srlg LOCAL_VAR1,BK,2 /*refresh BK*/ ++lgr LOCAL_VAR2,B /*refresh BPOINT*/ ++#endif ++ZERO_CVEC_1x1 ++cijle LOCAL_VAR1,0,.L1x1_mod ++ ++ALIGN_4 ++.L1x1_4_BK: /*BK_CUR LOOP */ ++ CALC_1x1_4 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x1_4_BK ++ ++ALIGN_4 ++.L1x1_mod: ++#if defined(TRMMKERNEL) ++ RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 ++ nill LOCAL_VAR1,3 ++#else ++la LOCAL_VAR1,3(0,0) ++NGR LOCAL_VAR1,BK /*refresh BK*/ ++#endif ++jz .L1x1_BK_Store ++ ++ALIGN_4 ++.L1x1_BK: /*BK_CUR LOOP */ ++ CALC_1x1 LOCAL_VAR3,LOCAL_VAR2 ++brctg LOCAL_VAR1,.L1x1_BK ++ ++ALIGN_4 ++.L1x1_BK_Store: ++/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ ++STORE_1x1 ALPHA ,CIJ_LOCAL, LDC_BYTE ++#if defined(TRMMKERNEL) ++ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,1 ++#endif ++ALIGN_2 ++.Lx1_INNER_END: ++/*add LDC_BYTE_COPY to new*/ ++sllg LOCAL_VAR2,BK,3 /*muyliply*2*sizeof(double) =multiply*8* 2**3 */ ++la CIJ,0(CIJ,LDC_BYTE) /*refresh CIJ=CIJ+LDC_BYTE */ ++#if defined(TRMMKERNEL) && !defined(LEFT) ++ aghi OFF,1 ++#endif ++la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*1*sizeof(double) */ ++ ++ ++ALIGN_2 ++.L_FUNC_END: ++/*end*/ ++#if defined(TRMMKERNEL) ++ld %f8,32(%r15) ++lmg %r6,%r13,40(%r15) ++#else ++lmg %r6,%r12,40(%r15) ++#endif ++br %r14 ++.end ++ ++ ++ ++ ++ ++ ++ +diff --git a/param.h b/param.h +index 0268fb5e..d28c63a9 100644 +--- a/param.h ++++ b/param.h +@@ -2548,6 +2548,46 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + #define SYMV_P 16 + #endif + ++#if defined(Z13) ++#define SNUMOPT 2 ++#define DNUMOPT 4 ++ ++#define GEMM_DEFAULT_OFFSET_A 0 ++#define GEMM_DEFAULT_OFFSET_B 0 ++#define GEMM_DEFAULT_ALIGN 0x03fffUL ++ ++#define SGEMM_DEFAULT_UNROLL_M 2 ++#define SGEMM_DEFAULT_UNROLL_N 2 ++ ++#define DGEMM_DEFAULT_UNROLL_M 8 ++#define DGEMM_DEFAULT_UNROLL_N 4 ++ ++#define CGEMM_DEFAULT_UNROLL_M 2 ++#define CGEMM_DEFAULT_UNROLL_N 2 ++ ++#define ZGEMM_DEFAULT_UNROLL_M 2 ++#define ZGEMM_DEFAULT_UNROLL_N 2 ++ ++#define SGEMM_DEFAULT_P 128 ++ #define DGEMM_DEFAULT_P 320 ++#define CGEMM_DEFAULT_P 96 ++#define ZGEMM_DEFAULT_P 64 ++ ++#define SGEMM_DEFAULT_Q 240 ++#define DGEMM_DEFAULT_Q 384 ++#define CGEMM_DEFAULT_Q 120 ++#define ZGEMM_DEFAULT_Q 120 ++ ++#define SGEMM_DEFAULT_R 12288 ++#define DGEMM_DEFAULT_R 4096 ++#define CGEMM_DEFAULT_R 4096 ++#define ZGEMM_DEFAULT_R 4096 ++ ++ ++#define SYMV_P 16 ++#endif ++ ++ + + #ifdef GENERIC + +-- +2.12.2 + + +From b489d350a1340d4aec3d2a7f9a97a588c118d670 Mon Sep 17 00:00:00 2001 +From: Abdurrauf +Date: Wed, 4 Jan 2017 19:41:24 +0400 +Subject: [PATCH 4/6] Update README.md (cherry picked from commit + 7f2a959e3eb7ce1a91a0f685021e3be0d9ee0552) + +--- + README.md | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/README.md b/README.md +index 5428f0eb..af30a0c8 100644 +--- a/README.md ++++ b/README.md +@@ -107,9 +107,12 @@ Please read GotoBLAS_01Readme.txt + - **ARM Cortex-A57**: Experimental + + #### IBM zEnterprise System: +-- **Z13**: Double precision real number +- git checkout z13 +- make USE_TRMM=1 ++- **Z13**: blas3 for double ++``` ++ git checkout z13 ++ make USE_TRMM=1 ++``` ++ + + ### Support OS: + - **GNU/Linux** +-- +2.12.2 + + +From 0ba111288df793cafce7cb159d3a0e005cd59dfb Mon Sep 17 00:00:00 2001 +From: Zhang Xianyi +Date: Mon, 9 Jan 2017 05:48:09 -0500 +Subject: [PATCH 5/6] Add USE_TRMM=1 for IBM z13 in kernel/Makefile.L3 + +(cherry picked from commit 864e202afdc9761637b442f084f0f26039256fa4) +--- + README.md | 6 +----- + kernel/Makefile.L3 | 4 ++++ + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/README.md b/README.md +index af30a0c8..1c3255fe 100644 +--- a/README.md ++++ b/README.md +@@ -107,11 +107,7 @@ Please read GotoBLAS_01Readme.txt + - **ARM Cortex-A57**: Experimental + + #### IBM zEnterprise System: +-- **Z13**: blas3 for double +-``` +- git checkout z13 +- make USE_TRMM=1 +-``` ++- **Z13**: blas3 for double + + + ### Support OS: +diff --git a/kernel/Makefile.L3 b/kernel/Makefile.L3 +index e55f153f..86e692e5 100644 +--- a/kernel/Makefile.L3 ++++ b/kernel/Makefile.L3 +@@ -36,6 +36,10 @@ ifeq ($(CORE), POWER8) + USE_TRMM = 1 + endif + ++ifeq ($(CORE), Z13) ++USE_TRMM = 1 ++endif ++ + + + +-- +2.12.2 + + +From 02459e22d3b8b34dbaea5d7e2e822d3c47b8cdef Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Dan=20Hor=C3=A1k?= +Date: Thu, 20 Apr 2017 21:13:41 +0200 +Subject: [PATCH 6/6] detect CPU on zArch + +(cherry picked from commit 81fed55782f0dd04649b1f0c4a44de85ac20162f) +--- + cpuid_zarch.c | 24 +++++++++++++++++++++--- + 1 file changed, 21 insertions(+), 3 deletions(-) + +diff --git a/cpuid_zarch.c b/cpuid_zarch.c +index e2e3b046..4e193542 100644 +--- a/cpuid_zarch.c ++++ b/cpuid_zarch.c +@@ -42,9 +42,27 @@ static char *cpuname_lower[] = { + + int detect(void) + { +- // return CPU_GENERIC; +- return CPU_Z13; +- ++ FILE *infile; ++ char buffer[512], *p; ++ ++ p = (char *)NULL; ++ infile = fopen("/proc/sysinfo", "r"); ++ while (fgets(buffer, sizeof(buffer), infile)){ ++ if (!strncmp("Type", buffer, 4)){ ++ p = strchr(buffer, ':') + 2; ++#if 0 ++ fprintf(stderr, "%s\n", p); ++#endif ++ break; ++ } ++ } ++ ++ fclose(infile); ++ ++ if (strstr(p, "2964")) return CPU_Z13; ++ if (strstr(p, "2965")) return CPU_Z13; ++ ++ return CPU_GENERIC; + } + + void get_libname(void) +-- +2.12.2 + diff --git a/openblas.spec b/openblas.spec index 83b3da8..5a5aee4 100644 --- a/openblas.spec +++ b/openblas.spec @@ -15,7 +15,7 @@ Name: openblas Version: 0.2.19 -Release: 10%{?dist} +Release: 11%{?dist} Summary: An optimized BLAS library based on GotoBLAS2 Group: Development/Libraries License: BSD @@ -31,6 +31,8 @@ Patch2: openblas-0.2.15-constructor.patch Patch3: openblas-0.2.19-tests.patch # From https://github.com/xianyi/OpenBLAS/issues/1078#issuecomment-279527810 Patch4: openblas-0.2.19-fix_register_clobbers.patch +# Backported support for s390x from the develop branch +Patch5: openblas-0.2.19-s390x.patch BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) @@ -237,6 +239,7 @@ cd OpenBLAS-%{version} %endif %patch3 -p1 -b .tests %patch4 -p1 -b .register_clobbers +%patch5 -p1 -b .s390x # Fix source permissions find -name \*.f -exec chmod 644 {} \; @@ -426,6 +429,9 @@ suffix="_power8" %ifarch aarch64 suffix="_armv8" %endif +%ifarch s390x +suffix="_zarch_generic" +%endif slibname=`basename %{buildroot}%{_libdir}/libopenblas${suffix}-*.so .so` mv %{buildroot}%{_libdir}/${slibname}.a %{buildroot}%{_libdir}/lib%{name}.a if [[ "$suffix" != "" ]]; then @@ -649,6 +655,9 @@ rm -rf %{buildroot} %endif %changelog +* Mon May 29 2017 Dan Horák - 0.2.19-11 +- add generic s390x support (#1442048) + * Mon Mar 20 2017 Orion Poplawski - 0.2.19-10 - Drop openblas-srpm-macros version requirement From 5f810a91d1ecadba58777a05a9d99b9bce9ad0e9 Mon Sep 17 00:00:00 2001 From: Fedora Release Engineering Date: Thu, 27 Jul 2017 01:44:08 +0000 Subject: [PATCH 6/9] - Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild --- openblas.spec | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/openblas.spec b/openblas.spec index 5a5aee4..dd4199c 100644 --- a/openblas.spec +++ b/openblas.spec @@ -15,7 +15,7 @@ Name: openblas Version: 0.2.19 -Release: 11%{?dist} +Release: 12%{?dist} Summary: An optimized BLAS library based on GotoBLAS2 Group: Development/Libraries License: BSD @@ -655,6 +655,9 @@ rm -rf %{buildroot} %endif %changelog +* Thu Jul 27 2017 Fedora Release Engineering - 0.2.19-12 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + * Mon May 29 2017 Dan Horák - 0.2.19-11 - add generic s390x support (#1442048) From 209efbaefa9dfca2e6cb5ec710edf6c6c0f47ca5 Mon Sep 17 00:00:00 2001 From: Susi Lehtola Date: Sat, 29 Jul 2017 03:53:39 +0200 Subject: [PATCH 7/9] Update to 0.2.20. --- openblas-0.2.19-fix_register_clobbers.patch | 7841 ------------------- openblas-0.2.19-s390x.patch | 4152 ---------- openblas.spec | 15 +- sources | 2 +- 4 files changed, 7 insertions(+), 12003 deletions(-) delete mode 100644 openblas-0.2.19-fix_register_clobbers.patch delete mode 100644 openblas-0.2.19-s390x.patch diff --git a/openblas-0.2.19-fix_register_clobbers.patch b/openblas-0.2.19-fix_register_clobbers.patch deleted file mode 100644 index b51d646..0000000 --- a/openblas-0.2.19-fix_register_clobbers.patch +++ /dev/null @@ -1,7841 +0,0 @@ -From 1e70600316ab080d80e318f32868c12eb7d1f2da Mon Sep 17 00:00:00 2001 -From: Alan Modra -Date: Thu, 9 Feb 2017 08:41:51 +1030 -Subject: [PATCH] Fix power8 asm() - -Lots of issues here. -- The vsx regs weren't listed as clobbered. -- Poor choice of vsx regs, which along with the lack of clobbers led to - trashing v0..v21 and fr14..fr23. Ideally you'd let gcc choose all - temp vsx regs, but asms currently have a limit of 30 i/o parms. -- Other regs were clobbered unnecessarily, seemingly in an attempt to - clobber inputs, with gcc-7 complaining about the clobber of r2. - (Changed inputs should be also listed as outputs or as an i/o.) -- "r" constraint used instead of "b" for gprs used in insns where the - r0 encoding means zero rather than r0. -- There were unused asm inputs too. -- All memory was clobbered rather than hooking up memory outputs with - proper memory constraints, and that and the lack of proper memory - input constraints meant the asms needed to be volatile and their - containing function noinline. -- Some parameters were being passed unnecessarily via memory. -- When a copy of a pointer input parm was needed, the value passed to - the asm was incremented in C and decremented in asm, rather than - using i/o parms, an early clobber constraint, or a temp output reg - copied in the asm. In most cases a small change to assembly could - be made that obviated the need for the extra pointer. -- A number of functions did not compute the final sum or dot-product - in assembly, instead using scalar code in C. -- dcbt was bogus. - -I've also fixed formatting of the asm. - -diff --git a/kernel/power/casum.c b/kernel/power/casum.c -index aeed0ca..d110858 100644 ---- a/kernel/power/casum.c -+++ b/kernel/power/casum.c -@@ -53,7 +53,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_16 - --static void casum_kernel_16(BLASLONG n, FLOAT *x1, FLOAT *svec) -+static FLOAT casum_kernel_16(BLASLONG n, FLOAT *x1) - { - - BLASLONG i=0; -@@ -92,11 +92,7 @@ static void casum_kernel_16(BLASLONG n, FLOAT *x1, FLOAT *svec) - - } - -- svec[0] = sum0+sum1+sum2+sum3; -- svec[1] = 0.0; -- svec[2] = 0.0; -- svec[3] = 0.0; -- -+ return sum0+sum1+sum2+sum3; - } - - #endif -@@ -106,7 +102,6 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - BLASLONG i=0; - BLASLONG ip=0; - FLOAT sumf = 0.0; -- FLOAT svec[4] __attribute__ ((aligned (16)));; - BLASLONG n1; - BLASLONG inc_x2; - -@@ -119,8 +114,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - if ( n1 > 0 ) - { - -- casum_kernel_16(n1, x, svec); -- sumf = svec[0] + svec[1]+svec[2]+svec[3]; -+ sumf = casum_kernel_16(n1, x); - i=n1; - ip = 2 * n1; - } -diff --git a/kernel/power/casum_microk_power8.c b/kernel/power/casum_microk_power8.c -index cb50234..38a1143 100644 ---- a/kernel/power/casum_microk_power8.c -+++ b/kernel/power/casum_microk_power8.c -@@ -34,144 +34,145 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_16 1 --static void casum_kernel_16( BLASLONG n, FLOAT *x, FLOAT *svec) __attribute__ ((noinline)); - --static void casum_kernel_16( BLASLONG n, FLOAT *x, FLOAT *svec) -+static float casum_kernel_16 (long n, float *x) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "dcbt %2 , %4 \n\t" -- -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2 , %4 \n\t" -- -- "xvabssp 48, 40 \n\t" -- "xvabssp 49, 41 \n\t" -- "xvabssp 50, 42 \n\t" -- "xvabssp 51, 43 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- -- "xvabssp 52, 44 \n\t" -- "xvabssp 53, 45 \n\t" -- -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- -- "xvabssp 54, 46 \n\t" -- "xvabssp 55, 47 \n\t" -- -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- -- "xvaddsp 32, 32, 48 \n\t" -- "xvaddsp 33, 33, 49 \n\t" -- -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "xvaddsp 34, 34, 50 \n\t" -- "xvaddsp 35, 35, 51 \n\t" -- "addi %2, %2, 128 \n\t" -- "xvaddsp 36, 36, 52 \n\t" -- "xvaddsp 37, 37, 53 \n\t" -- "addic. %0 , %0 , -16 \n\t" -- "xvaddsp 38, 38, 54 \n\t" -- "xvaddsp 39, 39, 55 \n\t" -- -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- -- "xvabssp 48, 40 \n\t" -- "xvabssp 49, 41 \n\t" -- "xvabssp 50, 42 \n\t" -- "xvabssp 51, 43 \n\t" -- "xvabssp 52, 44 \n\t" -- "xvabssp 53, 45 \n\t" -- "xvabssp 54, 46 \n\t" -- "xvabssp 55, 47 \n\t" -- -- "xvaddsp 32, 32, 48 \n\t" -- "xvaddsp 33, 33, 49 \n\t" -- "xvaddsp 34, 34, 50 \n\t" -- "xvaddsp 35, 35, 51 \n\t" -- "xvaddsp 36, 36, 52 \n\t" -- "xvaddsp 37, 37, 53 \n\t" -- "xvaddsp 38, 38, 54 \n\t" -- "xvaddsp 39, 39, 55 \n\t" -- -- "xvaddsp 32, 32, 33 \n\t" -- "xvaddsp 34, 34, 35 \n\t" -- "xvaddsp 36, 36, 37 \n\t" -- "xvaddsp 38, 38, 39 \n\t" -- -- "xvaddsp 32, 32, 34 \n\t" -- "xvaddsp 36, 36, 38 \n\t" -- -- "xvaddsp 32, 32, 36 \n\t" -- -- -- "stxvw4x 32, 0, %3 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (svec), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2", "memory" -- ); -- --} -- -- -+ float sum; -+ __vector float t0; -+ __vector float t1; -+ __vector float t2; -+ __vector float t3; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %8, %2 \n\t" -+ "lxvw4x 42, %9, %2 \n\t" -+ "lxvw4x 43, %10, %2 \n\t" -+ "lxvw4x 44, %11, %2 \n\t" -+ "lxvw4x 45, %12, %2 \n\t" -+ "lxvw4x 46, %13, %2 \n\t" -+ "lxvw4x 47, %14, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvabssp 48, 40 \n\t" -+ "xvabssp 49, 41 \n\t" -+ "xvabssp 50, 42 \n\t" -+ "xvabssp 51, 43 \n\t" -+ -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %8, %2 \n\t" -+ -+ "xvabssp %x3, 44 \n\t" -+ "xvabssp %x4, 45 \n\t" -+ -+ "lxvw4x 42, %9, %2 \n\t" -+ "lxvw4x 43, %10, %2 \n\t" -+ -+ "xvabssp %x5, 46 \n\t" -+ "xvabssp %x6, 47 \n\t" -+ -+ "lxvw4x 44, %11, %2 \n\t" -+ "lxvw4x 45, %12, %2 \n\t" -+ -+ "xvaddsp 32, 32, 48 \n\t" -+ "xvaddsp 33, 33, 49 \n\t" -+ -+ "lxvw4x 46, %13, %2 \n\t" -+ "lxvw4x 47, %14, %2 \n\t" -+ -+ "xvaddsp 34, 34, 50 \n\t" -+ "xvaddsp 35, 35, 51 \n\t" -+ "addi %2, %2, 128 \n\t" -+ "xvaddsp 36, 36, %x3 \n\t" -+ "xvaddsp 37, 37, %x4 \n\t" -+ "addic. %1, %1, -16 \n\t" -+ "xvaddsp 38, 38, %x5 \n\t" -+ "xvaddsp 39, 39, %x6 \n\t" -+ -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvabssp 48, 40 \n\t" -+ "xvabssp 49, 41 \n\t" -+ "xvabssp 50, 42 \n\t" -+ "xvabssp 51, 43 \n\t" -+ "xvabssp %x3, 44 \n\t" -+ "xvabssp %x4, 45 \n\t" -+ "xvabssp %x5, 46 \n\t" -+ "xvabssp %x6, 47 \n\t" -+ -+ "xvaddsp 32, 32, 48 \n\t" -+ "xvaddsp 33, 33, 49 \n\t" -+ "xvaddsp 34, 34, 50 \n\t" -+ "xvaddsp 35, 35, 51 \n\t" -+ "xvaddsp 36, 36, %x3 \n\t" -+ "xvaddsp 37, 37, %x4 \n\t" -+ "xvaddsp 38, 38, %x5 \n\t" -+ "xvaddsp 39, 39, %x6 \n\t" -+ -+ "xvaddsp 32, 32, 33 \n\t" -+ "xvaddsp 34, 34, 35 \n\t" -+ "xvaddsp 36, 36, 37 \n\t" -+ "xvaddsp 38, 38, 39 \n\t" -+ -+ "xvaddsp 32, 32, 34 \n\t" -+ "xvaddsp 36, 36, 38 \n\t" -+ -+ "xvaddsp 32, 32, 36 \n\t" -+ -+ "xxsldwi 33, 32, 32, 2 \n\t" -+ "xvaddsp 32, 32, 33 \n\t" -+ -+ "xxsldwi 33, 32, 32, 1 \n\t" -+ "xvaddsp 32, 32, 33 \n\t" -+ -+ "xscvspdp %0, 32 \n" -+ -+ "#n=%1 x=%3=%2 sum=%0 o16=%8 o32=%9 o48=%10 o64=%11 o80=%12 o96=%13 o112=%14\n" -+ "#t0=%x3 t1=%x4 t2=%x5 t3=%x6" -+ : -+ "=f" (sum), // 0 -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0), // 3 -+ "=wa" (t1), // 4 -+ "=wa" (t2), // 5 -+ "=wa" (t3) // 6 -+ : -+ "m" (*x), -+ "b" (16), // 8 -+ "b" (32), // 9 -+ "b" (48), // 10 -+ "b" (64), // 11 -+ "b" (80), // 12 -+ "b" (96), // 13 -+ "b" (112) // 14 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+ -+ return sum; -+} -diff --git a/kernel/power/ccopy_microk_power8.c b/kernel/power/ccopy_microk_power8.c -index 95b3559..b2b1bea 100644 ---- a/kernel/power/ccopy_microk_power8.c -+++ b/kernel/power/ccopy_microk_power8.c -@@ -35,140 +35,121 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_32 1 - --static void ccopy_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void ccopy_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) -+static void ccopy_kernel_32 (long n, float *x, float *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvw4x 50, 0, %2 \n\t" -- "lxvw4x 51, %5, %2 \n\t" -- "lxvw4x 52, %6, %2 \n\t" -- "lxvw4x 53, %7, %2 \n\t" -- "lxvw4x 54, %8, %2 \n\t" -- "lxvw4x 55, %9, %2 \n\t" -- "lxvw4x 56, %10, %2 \n\t" -- "lxvw4x 57, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "stxvw4x 40, 0, %1 \n\t" -- "stxvw4x 41, %5, %1 \n\t" -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "stxvw4x 42, %6, %1 \n\t" -- "stxvw4x 43, %7, %1 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "stxvw4x 44, %8, %1 \n\t" -- "stxvw4x 45, %9, %1 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "stxvw4x 46, %10, %1 \n\t" -- "stxvw4x 47, %11, %1 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "stxvw4x 50, 0, %1 \n\t" -- "stxvw4x 51, %5, %1 \n\t" -- "lxvw4x 50, 0, %2 \n\t" -- "lxvw4x 51, %5, %2 \n\t" -- "stxvw4x 52, %6, %1 \n\t" -- "stxvw4x 53, %7, %1 \n\t" -- "lxvw4x 52, %6, %2 \n\t" -- "lxvw4x 53, %7, %2 \n\t" -- "stxvw4x 54, %8, %1 \n\t" -- "stxvw4x 55, %9, %1 \n\t" -- "lxvw4x 54, %8, %2 \n\t" -- "lxvw4x 55, %9, %2 \n\t" -- "stxvw4x 56, %10, %1 \n\t" -- "stxvw4x 57, %11, %1 \n\t" -- "lxvw4x 56, %10, %2 \n\t" -- "lxvw4x 57, %11, %2 \n\t" -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "stxvw4x 40, 0, %1 \n\t" -- "stxvw4x 41, %5, %1 \n\t" -- "stxvw4x 42, %6, %1 \n\t" -- "stxvw4x 43, %7, %1 \n\t" -- "stxvw4x 44, %8, %1 \n\t" -- "stxvw4x 45, %9, %1 \n\t" -- "stxvw4x 46, %10, %1 \n\t" -- "stxvw4x 47, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvw4x 50, 0, %1 \n\t" -- "stxvw4x 51, %5, %1 \n\t" -- "stxvw4x 52, %6, %1 \n\t" -- "stxvw4x 53, %7, %1 \n\t" -- "stxvw4x 54, %8, %1 \n\t" -- "stxvw4x 55, %9, %1 \n\t" -- "stxvw4x 56, %10, %1 \n\t" -- "stxvw4x 57, %11, %1 \n\t" -- -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ "lxvw4x 32, 0, %2 \n\t" -+ "lxvw4x 33, %5, %2 \n\t" -+ "lxvw4x 34, %6, %2 \n\t" -+ "lxvw4x 35, %7, %2 \n\t" -+ "lxvw4x 36, %8, %2 \n\t" -+ "lxvw4x 37, %9, %2 \n\t" -+ "lxvw4x 38, %10, %2 \n\t" -+ "lxvw4x 39, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %5, %2 \n\t" -+ "lxvw4x 42, %6, %2 \n\t" -+ "lxvw4x 43, %7, %2 \n\t" -+ "lxvw4x 44, %8, %2 \n\t" -+ "lxvw4x 45, %9, %2 \n\t" -+ "lxvw4x 46, %10, %2 \n\t" -+ "lxvw4x 47, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "stxvw4x 32, 0, %3 \n\t" -+ "stxvw4x 33, %5, %3 \n\t" -+ "lxvw4x 32, 0, %2 \n\t" -+ "lxvw4x 33, %5, %2 \n\t" -+ "stxvw4x 34, %6, %3 \n\t" -+ "stxvw4x 35, %7, %3 \n\t" -+ "lxvw4x 34, %6, %2 \n\t" -+ "lxvw4x 35, %7, %2 \n\t" -+ "stxvw4x 36, %8, %3 \n\t" -+ "stxvw4x 37, %9, %3 \n\t" -+ "lxvw4x 36, %8, %2 \n\t" -+ "lxvw4x 37, %9, %2 \n\t" -+ "stxvw4x 38, %10, %3 \n\t" -+ "stxvw4x 39, %11, %3 \n\t" -+ "lxvw4x 38, %10, %2 \n\t" -+ "lxvw4x 39, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "stxvw4x 40, 0, %3 \n\t" -+ "stxvw4x 41, %5, %3 \n\t" -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %5, %2 \n\t" -+ "stxvw4x 42, %6, %3 \n\t" -+ "stxvw4x 43, %7, %3 \n\t" -+ "lxvw4x 42, %6, %2 \n\t" -+ "lxvw4x 43, %7, %2 \n\t" -+ "stxvw4x 44, %8, %3 \n\t" -+ "stxvw4x 45, %9, %3 \n\t" -+ "lxvw4x 44, %8, %2 \n\t" -+ "lxvw4x 45, %9, %2 \n\t" -+ "stxvw4x 46, %10, %3 \n\t" -+ "stxvw4x 47, %11, %3 \n\t" -+ "lxvw4x 46, %10, %2 \n\t" -+ "lxvw4x 47, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "stxvw4x 32, 0, %3 \n\t" -+ "stxvw4x 33, %5, %3 \n\t" -+ "stxvw4x 34, %6, %3 \n\t" -+ "stxvw4x 35, %7, %3 \n\t" -+ "stxvw4x 36, %8, %3 \n\t" -+ "stxvw4x 37, %9, %3 \n\t" -+ "stxvw4x 38, %10, %3 \n\t" -+ "stxvw4x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvw4x 40, 0, %3 \n\t" -+ "stxvw4x 41, %5, %3 \n\t" -+ "stxvw4x 42, %6, %3 \n\t" -+ "stxvw4x 43, %7, %3 \n\t" -+ "stxvw4x 44, %8, %3 \n\t" -+ "stxvw4x 45, %9, %3 \n\t" -+ "stxvw4x 46, %10, %3 \n\t" -+ "stxvw4x 47, %11, %3 \n" -+ -+ "#n=%1 x=%4=%2 y=%0=%3 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "=m" (*y), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y) // 3 -+ : -+ "m" (*x), -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -diff --git a/kernel/power/cswap_microk_power8.c b/kernel/power/cswap_microk_power8.c -index 90ab59c..1dd03dc 100644 ---- a/kernel/power/cswap_microk_power8.c -+++ b/kernel/power/cswap_microk_power8.c -@@ -35,146 +35,124 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_32 1 - --static void cswap_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void cswap_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) -+static void cswap_kernel_32 (long n, float *x, float *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *x2=x+1; -- FLOAT *y2=y+1; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "addi %3, %3, -4 \n\t" -- "addi %4, %4, -4 \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "lxvw4x 32, 0, %2 \n\t" -- "lxvw4x 33, %5, %2 \n\t" -- "lxvw4x 34, %6, %2 \n\t" -- "lxvw4x 35, %7, %2 \n\t" -- "lxvw4x 36, %8, %2 \n\t" -- "lxvw4x 37, %9, %2 \n\t" -- "lxvw4x 38, %10, %2 \n\t" -- "lxvw4x 39, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvw4x 48, 0, %1 \n\t" -- "lxvw4x 49, %5, %1 \n\t" -- "lxvw4x 50, %6, %1 \n\t" -- "lxvw4x 51, %7, %1 \n\t" -- "lxvw4x 52, %8, %1 \n\t" -- "lxvw4x 53, %9, %1 \n\t" -- "lxvw4x 54, %10, %1 \n\t" -- "lxvw4x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "lxvw4x 56, 0, %1 \n\t" -- "lxvw4x 57, %5, %1 \n\t" -- "lxvw4x 58, %6, %1 \n\t" -- "lxvw4x 59, %7, %1 \n\t" -- "lxvw4x 60, %8, %1 \n\t" -- "lxvw4x 61, %9, %1 \n\t" -- "lxvw4x 62, %10, %1 \n\t" -- "lxvw4x 63, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvw4x 32, 0, %3 \n\t" -- "stxvw4x 33, %5, %3 \n\t" -- "stxvw4x 34, %6, %3 \n\t" -- "stxvw4x 35, %7, %3 \n\t" -- "stxvw4x 36, %8, %3 \n\t" -- "stxvw4x 37, %9, %3 \n\t" -- "stxvw4x 38, %10, %3 \n\t" -- "stxvw4x 39, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvw4x 40, 0, %3 \n\t" -- "stxvw4x 41, %5, %3 \n\t" -- "stxvw4x 42, %6, %3 \n\t" -- "stxvw4x 43, %7, %3 \n\t" -- "stxvw4x 44, %8, %3 \n\t" -- "stxvw4x 45, %9, %3 \n\t" -- "stxvw4x 46, %10, %3 \n\t" -- "stxvw4x 47, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvw4x 48, 0, %4 \n\t" -- "stxvw4x 49, %5, %4 \n\t" -- "stxvw4x 50, %6, %4 \n\t" -- "stxvw4x 51, %7, %4 \n\t" -- "stxvw4x 52, %8, %4 \n\t" -- "stxvw4x 53, %9, %4 \n\t" -- "stxvw4x 54, %10, %4 \n\t" -- "stxvw4x 55, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "stxvw4x 56, 0, %4 \n\t" -- "stxvw4x 57, %5, %4 \n\t" -- "stxvw4x 58, %6, %4 \n\t" -- "stxvw4x 59, %7, %4 \n\t" -- "stxvw4x 60, %8, %4 \n\t" -- "stxvw4x 61, %9, %4 \n\t" -- "stxvw4x 62, %10, %4 \n\t" -- "stxvw4x 63, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (y2), // 3 -- "r" (x2), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "%3", "%4", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "lxvw4x 32, 0, %4 \n\t" -+ "lxvw4x 33, %5, %4 \n\t" -+ "lxvw4x 34, %6, %4 \n\t" -+ "lxvw4x 35, %7, %4 \n\t" -+ "lxvw4x 36, %8, %4 \n\t" -+ "lxvw4x 37, %9, %4 \n\t" -+ "lxvw4x 38, %10, %4 \n\t" -+ "lxvw4x 39, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "lxvw4x 40, 0, %4 \n\t" -+ "lxvw4x 41, %5, %4 \n\t" -+ "lxvw4x 42, %6, %4 \n\t" -+ "lxvw4x 43, %7, %4 \n\t" -+ "lxvw4x 44, %8, %4 \n\t" -+ "lxvw4x 45, %9, %4 \n\t" -+ "lxvw4x 46, %10, %4 \n\t" -+ "lxvw4x 47, %11, %4 \n\t" -+ -+ "addi %4, %4, -128 \n\t" -+ -+ "lxvw4x 48, 0, %3 \n\t" -+ "lxvw4x 49, %5, %3 \n\t" -+ "lxvw4x 50, %6, %3 \n\t" -+ "lxvw4x 51, %7, %3 \n\t" -+ "lxvw4x 0, %8, %3 \n\t" -+ "lxvw4x 1, %9, %3 \n\t" -+ "lxvw4x 2, %10, %3 \n\t" -+ "lxvw4x 3, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "lxvw4x 4, 0, %3 \n\t" -+ "lxvw4x 5, %5, %3 \n\t" -+ "lxvw4x 6, %6, %3 \n\t" -+ "lxvw4x 7, %7, %3 \n\t" -+ "lxvw4x 8, %8, %3 \n\t" -+ "lxvw4x 9, %9, %3 \n\t" -+ "lxvw4x 10, %10, %3 \n\t" -+ "lxvw4x 11, %11, %3 \n\t" -+ -+ "addi %3, %3, -128 \n\t" -+ -+ "stxvw4x 32, 0, %3 \n\t" -+ "stxvw4x 33, %5, %3 \n\t" -+ "stxvw4x 34, %6, %3 \n\t" -+ "stxvw4x 35, %7, %3 \n\t" -+ "stxvw4x 36, %8, %3 \n\t" -+ "stxvw4x 37, %9, %3 \n\t" -+ "stxvw4x 38, %10, %3 \n\t" -+ "stxvw4x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvw4x 40, 0, %3 \n\t" -+ "stxvw4x 41, %5, %3 \n\t" -+ "stxvw4x 42, %6, %3 \n\t" -+ "stxvw4x 43, %7, %3 \n\t" -+ "stxvw4x 44, %8, %3 \n\t" -+ "stxvw4x 45, %9, %3 \n\t" -+ "stxvw4x 46, %10, %3 \n\t" -+ "stxvw4x 47, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvw4x 48, 0, %4 \n\t" -+ "stxvw4x 49, %5, %4 \n\t" -+ "stxvw4x 50, %6, %4 \n\t" -+ "stxvw4x 51, %7, %4 \n\t" -+ "stxvw4x 0, %8, %4 \n\t" -+ "stxvw4x 1, %9, %4 \n\t" -+ "stxvw4x 2, %10, %4 \n\t" -+ "stxvw4x 3, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "stxvw4x 4, 0, %4 \n\t" -+ "stxvw4x 5, %5, %4 \n\t" -+ "stxvw4x 6, %6, %4 \n\t" -+ "stxvw4x 7, %7, %4 \n\t" -+ "stxvw4x 8, %8, %4 \n\t" -+ "stxvw4x 9, %9, %4 \n\t" -+ "stxvw4x 10, %10, %4 \n\t" -+ "stxvw4x 11, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "addic. %2, %2, -32 \n\t" -+ "bgt 1b \n" -+ -+ "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "+m" (*x), -+ "+m" (*y), -+ "+r" (n), // 2 -+ "+b" (x), // 3 -+ "+b" (y) // 4 -+ : -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51","vs0","vs1","vs2","vs3", -+ "vs4","vs5","vs6","vs7","vs8","vs9","vs10","vs11" -+ ); -+} -diff --git a/kernel/power/dasum.c b/kernel/power/dasum.c -index 77f5345..73962c2 100644 ---- a/kernel/power/dasum.c -+++ b/kernel/power/dasum.c -@@ -42,7 +42,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #else - --#define ABS fabsf -+#error supports double only - - #endif - -@@ -53,7 +53,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_16 - --static void dasum_kernel_16(BLASLONG n, FLOAT *x1, FLOAT *svec) -+static FLOAT dasum_kernel_16(BLASLONG n, FLOAT *x1) - { - - BLASLONG i=0; -@@ -92,9 +92,7 @@ static void dasum_kernel_16(BLASLONG n, FLOAT *x1, FLOAT *svec) - - } - -- svec[0] = sum0+sum1+sum2+sum3; -- svec[1] = 0.0; -- -+ return sum0+sum1+sum2+sum3; - } - - #endif -@@ -103,7 +101,6 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - { - BLASLONG i=0; - FLOAT sumf = 0.0; -- FLOAT svec[2] __attribute__ ((aligned (16)));; - BLASLONG n1; - - if (n <= 0 || inc_x <= 0) return(sumf); -@@ -115,8 +112,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - if ( n1 > 0 ) - { - -- dasum_kernel_16(n1, x, svec); -- sumf = svec[0] + svec[1]; -+ sumf = dasum_kernel_16(n1, x); - i=n1; - } - -diff --git a/kernel/power/dasum_microk_power8.c b/kernel/power/dasum_microk_power8.c -index cc38c4f..880d7d2 100644 ---- a/kernel/power/dasum_microk_power8.c -+++ b/kernel/power/dasum_microk_power8.c -@@ -34,144 +34,142 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_16 1 --static void dasum_kernel_16( BLASLONG n, FLOAT *x, FLOAT *svec) __attribute__ ((noinline)); - --static void dasum_kernel_16( BLASLONG n, FLOAT *x, FLOAT *svec) -+static double dasum_kernel_16 (long n, double *x) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "dcbt %2 , %4 \n\t" -- -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2 , %4 \n\t" -- -- "xvabsdp 48, 40 \n\t" -- "xvabsdp 49, 41 \n\t" -- "xvabsdp 50, 42 \n\t" -- "xvabsdp 51, 43 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- -- "xvabsdp 52, 44 \n\t" -- "xvabsdp 53, 45 \n\t" -- -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- -- "xvabsdp 54, 46 \n\t" -- "xvabsdp 55, 47 \n\t" -- -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- -- "xvadddp 32, 32, 48 \n\t" -- "xvadddp 33, 33, 49 \n\t" -- -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "xvadddp 34, 34, 50 \n\t" -- "xvadddp 35, 35, 51 \n\t" -- "addi %2, %2, 128 \n\t" -- "xvadddp 36, 36, 52 \n\t" -- "xvadddp 37, 37, 53 \n\t" -- "addic. %0 , %0 , -16 \n\t" -- "xvadddp 38, 38, 54 \n\t" -- "xvadddp 39, 39, 55 \n\t" -- -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- -- "xvabsdp 48, 40 \n\t" -- "xvabsdp 49, 41 \n\t" -- "xvabsdp 50, 42 \n\t" -- "xvabsdp 51, 43 \n\t" -- "xvabsdp 52, 44 \n\t" -- "xvabsdp 53, 45 \n\t" -- "xvabsdp 54, 46 \n\t" -- "xvabsdp 55, 47 \n\t" -- -- "xvadddp 32, 32, 48 \n\t" -- "xvadddp 33, 33, 49 \n\t" -- "xvadddp 34, 34, 50 \n\t" -- "xvadddp 35, 35, 51 \n\t" -- "xvadddp 36, 36, 52 \n\t" -- "xvadddp 37, 37, 53 \n\t" -- "xvadddp 38, 38, 54 \n\t" -- "xvadddp 39, 39, 55 \n\t" -- -- "xvadddp 32, 32, 33 \n\t" -- "xvadddp 34, 34, 35 \n\t" -- "xvadddp 36, 36, 37 \n\t" -- "xvadddp 38, 38, 39 \n\t" -- -- "xvadddp 32, 32, 34 \n\t" -- "xvadddp 36, 36, 38 \n\t" -- -- "xvadddp 32, 32, 36 \n\t" -- -- -- "stxvd2x 32, 0, %3 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (svec), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2", "memory" -- ); -- --} -+ double sum; -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %8, %2 \n\t" -+ "lxvd2x 42, %9, %2 \n\t" -+ "lxvd2x 43, %10, %2 \n\t" -+ "lxvd2x 44, %11, %2 \n\t" -+ "lxvd2x 45, %12, %2 \n\t" -+ "lxvd2x 46, %13, %2 \n\t" -+ "lxvd2x 47, %14, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvabsdp 48, 40 \n\t" -+ "xvabsdp 49, 41 \n\t" -+ "xvabsdp 50, 42 \n\t" -+ "xvabsdp 51, 43 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %8, %2 \n\t" -+ -+ "xvabsdp %x3, 44 \n\t" -+ "xvabsdp %x4, 45 \n\t" -+ -+ "lxvd2x 42, %9, %2 \n\t" -+ "lxvd2x 43, %10, %2 \n\t" -+ -+ "xvabsdp %x5, 46 \n\t" -+ "xvabsdp %x6, 47 \n\t" -+ -+ "lxvd2x 44, %11, %2 \n\t" -+ "lxvd2x 45, %12, %2 \n\t" -+ -+ "xvadddp 32, 32, 48 \n\t" -+ "xvadddp 33, 33, 49 \n\t" -+ -+ "lxvd2x 46, %13, %2 \n\t" -+ "lxvd2x 47, %14, %2 \n\t" -+ -+ "xvadddp 34, 34, 50 \n\t" -+ "xvadddp 35, 35, 51 \n\t" -+ "addi %2, %2, 128 \n\t" -+ "xvadddp 36, 36, %x3 \n\t" -+ "xvadddp 37, 37, %x4 \n\t" -+ "addic. %1, %1, -16 \n\t" -+ "xvadddp 38, 38, %x5 \n\t" -+ "xvadddp 39, 39, %x6 \n\t" -+ -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvabsdp 48, 40 \n\t" -+ "xvabsdp 49, 41 \n\t" -+ "xvabsdp 50, 42 \n\t" -+ "xvabsdp 51, 43 \n\t" -+ "xvabsdp %x3, 44 \n\t" -+ "xvabsdp %x4, 45 \n\t" -+ "xvabsdp %x5, 46 \n\t" -+ "xvabsdp %x6, 47 \n\t" -+ -+ "xvadddp 32, 32, 48 \n\t" -+ "xvadddp 33, 33, 49 \n\t" -+ "xvadddp 34, 34, 50 \n\t" -+ "xvadddp 35, 35, 51 \n\t" -+ "xvadddp 36, 36, %x3 \n\t" -+ "xvadddp 37, 37, %x4 \n\t" -+ "xvadddp 38, 38, %x5 \n\t" -+ "xvadddp 39, 39, %x6 \n\t" -+ -+ "xvadddp 32, 32, 33 \n\t" -+ "xvadddp 34, 34, 35 \n\t" -+ "xvadddp 36, 36, 37 \n\t" -+ "xvadddp 38, 38, 39 \n\t" -+ -+ "xvadddp 32, 32, 34 \n\t" -+ "xvadddp 36, 36, 38 \n\t" -+ -+ "xvadddp 32, 32, 36 \n\t" -+ -+ "xxswapd 33, 32 \n\t" -+ "xsadddp %x0, 32, 33 \n" -+ -+ "#n=%1 x=%3=%2 sum=%0 o16=%8 o32=%9 o48=%10 o64=%11 o80=%12 o96=%13 o112=%14\n" -+ "#t0=%x3 t1=%x4 t2=%x5 t3=%x6" -+ : -+ "=d" (sum), // 0 -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0), // 3 -+ "=wa" (t1), // 4 -+ "=wa" (t2), // 5 -+ "=wa" (t3) // 6 -+ : -+ "m" (*x), -+ "b" (16), // 8 -+ "b" (32), // 9 -+ "b" (48), // 10 -+ "b" (64), // 11 -+ "b" (80), // 12 -+ "b" (96), // 13 -+ "b" (112) // 14 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+ -+ return sum; -+} - - -diff --git a/kernel/power/daxpy.c b/kernel/power/daxpy.c -index 4365bd8..df0572e 100644 ---- a/kernel/power/daxpy.c -+++ b/kernel/power/daxpy.c -@@ -43,21 +43,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_8 - --static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) -+static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha) - { - BLASLONG register i = 0; -- FLOAT a = *alpha; - - while(i < n) - { -- y[i] += a * x[i]; -- y[i+1] += a * x[i+1]; -- y[i+2] += a * x[i+2]; -- y[i+3] += a * x[i+3]; -- y[i+4] += a * x[i+4]; -- y[i+5] += a * x[i+5]; -- y[i+6] += a * x[i+6]; -- y[i+7] += a * x[i+7]; -+ y[i] += alpha * x[i]; -+ y[i+1] += alpha * x[i+1]; -+ y[i+2] += alpha * x[i+2]; -+ y[i+3] += alpha * x[i+3]; -+ y[i+4] += alpha * x[i+4]; -+ y[i+5] += alpha * x[i+5]; -+ y[i+6] += alpha * x[i+6]; -+ y[i+7] += alpha * x[i+7]; - i+=8 ; - - } -@@ -70,11 +69,6 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS - { - BLASLONG i=0; - BLASLONG ix=0,iy=0; -- FLOAT a2[4]; -- a2[0]=da; -- a2[1]=da; -- a2[2]=da; -- a2[3]=da; - - if ( n <= 0 ) return(0); - -@@ -84,7 +78,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS - BLASLONG n1 = n & -16; - - if ( n1 ) -- daxpy_kernel_8(n1, x, y , a2 ); -+ daxpy_kernel_8(n1, x, y, da); - - i = n1; - while(i < n) -diff --git a/kernel/power/daxpy_microk_power8.c b/kernel/power/daxpy_microk_power8.c -index bb3f73a..fb714a3 100644 ---- a/kernel/power/daxpy_microk_power8.c -+++ b/kernel/power/daxpy_microk_power8.c -@@ -35,167 +35,183 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - #define HAVE_KERNEL_8 1 --static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); - --static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) -+static void daxpy_kernel_8 (long n, double *x, double *y, double alpha) - { -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ __vector double t4; -+ __vector double t5; -+ __vector double t6; -+ __vector double t7; -+ __vector double t8; -+ __vector double t9; -+ __vector double t10; -+ __vector double t11; -+ __vector double t12; -+ __vector double t13; -+ __vector double t14; -+ __vector double t15; -+ __vector double t16; - -+ __asm__ -+ ( -+ "xxspltd %x4, %x22, 0 \n\t" - -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *y2=y+1; -- BLASLONG pre = 384; -+ "dcbt 0, %2 \n\t" -+ "dcbt 0, %3 \n\t" - -- __asm__ __volatile__ -- ( -+ "lxvd2x %x5, 0, %2 \n\t" -+ "lxvd2x %x6, %23, %2 \n\t" -+ "lxvd2x %x7, %24, %2 \n\t" -+ "lxvd2x %x8, %25, %2 \n\t" - -- "lxsdx 33, %5, %4 \n\t" -- "xxspltd 32, 33, 0 \n\t" -- "addi %8, %8, -8 \n\t" -+ "lxvd2x %x13, 0, %3 \n\t" -+ "lxvd2x %x14, %23, %3 \n\t" -+ "lxvd2x %x15, %24, %3 \n\t" -+ "lxvd2x %x16, %25, %3 \n\t" - -- "dcbt %2, %9 \n\t" -- "dcbt %3, %9 \n\t" -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" - -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -+ "lxvd2x %x9, 0, %2 \n\t" -+ "lxvd2x %x10, %23, %2 \n\t" -+ "lxvd2x %x11, %24, %2 \n\t" -+ "lxvd2x %x12, %25, %2 \n\t" - -- "lxvd2x 48, 0, %3 \n\t" -- "lxvd2x 49, %5, %3 \n\t" -- "lxvd2x 50, %6, %3 \n\t" -- "lxvd2x 51, %7, %3 \n\t" -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "lxvd2x 44, 0, %2 \n\t" -- "lxvd2x 45, %5, %2 \n\t" -- "lxvd2x 46, %6, %2 \n\t" -- "lxvd2x 47, %7, %2 \n\t" -- -- "lxvd2x 52, 0, %3 \n\t" -- "lxvd2x 53, %5, %3 \n\t" -- "lxvd2x 54, %6, %3 \n\t" -- "lxvd2x 55, %7, %3 \n\t" -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %9 \n\t" -- "dcbt %3, %9 \n\t" -- -- "xvmaddadp 48, 40, 32 \n\t" -- "xvmaddadp 49, 41, 32 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- -- "stxvd2x 48, 0, %8 \n\t" -- "stxvd2x 49, %5, %8 \n\t" -- -- "xvmaddadp 50, 42, 32 \n\t" -- "xvmaddadp 51, 43, 32 \n\t" -- -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- -- "stxvd2x 50, %6, %8 \n\t" -- "stxvd2x 51, %7, %8 \n\t" -- -- "lxvd2x 48, 0, %3 \n\t" -- "lxvd2x 49, %5, %3 \n\t" -- "lxvd2x 50, %6, %3 \n\t" -- "lxvd2x 51, %7, %3 \n\t" -- -- "addi %2, %2, 64 \n\t" -- "addi %8, %8, 64 \n\t" -- -- "xvmaddadp 52, 44, 32 \n\t" -- "addi %3, %3, 64 \n\t" -- "xvmaddadp 53, 45, 32 \n\t" -- -- "lxvd2x 44, 0, %2 \n\t" -- "lxvd2x 45, %5, %2 \n\t" -- -- "stxvd2x 52, 0, %8 \n\t" -- "stxvd2x 53, %5, %8 \n\t" -- -- "xvmaddadp 54, 46, 32 \n\t" -- "xvmaddadp 55, 47, 32 \n\t" -- -- "lxvd2x 46, %6, %2 \n\t" -- "lxvd2x 47, %7, %2 \n\t" -- -- "stxvd2x 54, %6, %8 \n\t" -- "stxvd2x 55, %7, %8 \n\t" -- -- "addi %2, %2, 64 \n\t" -- "addi %8, %8, 64 \n\t" -- -- "lxvd2x 52, 0, %3 \n\t" -- "lxvd2x 53, %5, %3 \n\t" -- "lxvd2x 54, %6, %3 \n\t" -- "lxvd2x 55, %7, %3 \n\t" -- -- "addi %3, %3, 64 \n\t" -- -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- -- "xvmaddadp 48, 40, 32 \n\t" -- "xvmaddadp 49, 41, 32 \n\t" -- "xvmaddadp 50, 42, 32 \n\t" -- "xvmaddadp 51, 43, 32 \n\t" -- -- "xvmaddadp 52, 44, 32 \n\t" -- "xvmaddadp 53, 45, 32 \n\t" -- "xvmaddadp 54, 46, 32 \n\t" -- "xvmaddadp 55, 47, 32 \n\t" -- -- "stxvd2x 48, 0, %8 \n\t" -- "stxvd2x 49, %5, %8 \n\t" -- "stxvd2x 50, %6, %8 \n\t" -- "stxvd2x 51, %7, %8 \n\t" -- -- "addi %8, %8, 64 \n\t" -- -- "stxvd2x 52, 0, %8 \n\t" -- "stxvd2x 53, %5, %8 \n\t" -- "stxvd2x 54, %6, %8 \n\t" -- "stxvd2x 55, %7, %8 \n\t" -- -- "addi %8, %8, 64 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (y1), // 3 -- "r" (alpha), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (y2), // 8 -- "r" (pre) // 9 -- : "cr0", "%0", "%2" , "%3", "%8", "memory" -- ); -- --} -+ "lxvd2x %x17, 0, %3 \n\t" -+ "lxvd2x %x18, %23, %3 \n\t" -+ "lxvd2x %x19, %24, %3 \n\t" -+ "lxvd2x %x20, %25, %3 \n\t" -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, -64 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "ble 2f \n\t" -+ -+ ".align 5 \n" -+ "1: \n\t" -+ -+ "xvmaddadp %x13, %x5, %x4 \n\t" -+ "xvmaddadp %x14, %x6, %x4 \n\t" -+ -+ "lxvd2x %x5, 0, %2 \n\t" -+ "lxvd2x %x6, %23, %2 \n\t" -+ -+ "stxvd2x %x13, 0, %3 \n\t" -+ "stxvd2x %x14, %23, %3 \n\t" -+ -+ "xvmaddadp %x15, %x7, %x4 \n\t" -+ "xvmaddadp %x16, %x8, %x4 \n\t" -+ -+ "lxvd2x %x7, %24, %2 \n\t" -+ "lxvd2x %x8, %25, %2 \n\t" -+ -+ "stxvd2x %x15, %24, %3 \n\t" -+ "stxvd2x %x16, %25, %3 \n\t" -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 128 \n\t" -+ -+ "lxvd2x %x13, 0, %3 \n\t" -+ "lxvd2x %x14, %23, %3 \n\t" -+ "lxvd2x %x15, %24, %3 \n\t" -+ "lxvd2x %x16, %25, %3 \n\t" -+ -+ "addi %3, %3, -64 \n\t" -+ -+ "xvmaddadp %x17, %x9, %x4 \n\t" -+ "xvmaddadp %x18, %x10, %x4 \n\t" -+ -+ "lxvd2x %x9, 0, %2 \n\t" -+ "lxvd2x %x10, %23, %2 \n\t" -+ -+ "stxvd2x %x17, 0, %3 \n\t" -+ "stxvd2x %x18, %23, %3 \n\t" -+ -+ "xvmaddadp %x19, %x11, %x4 \n\t" -+ "xvmaddadp %x20, %x12, %x4 \n\t" -+ -+ "lxvd2x %x11, %24, %2 \n\t" -+ "lxvd2x %x12, %25, %2 \n\t" -+ -+ "stxvd2x %x19, %24, %3 \n\t" -+ "stxvd2x %x20, %25, %3 \n\t" -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 128 \n\t" -+ -+ "lxvd2x %x17, 0, %3 \n\t" -+ "lxvd2x %x18, %23, %3 \n\t" -+ "lxvd2x %x19, %24, %3 \n\t" -+ "lxvd2x %x20, %25, %3 \n\t" -+ -+ "addi %3, %3, -64 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmaddadp %x13, %x5, %x4 \n\t" -+ "xvmaddadp %x14, %x6, %x4 \n\t" -+ "xvmaddadp %x15, %x7, %x4 \n\t" -+ "xvmaddadp %x16, %x8, %x4 \n\t" -+ -+ "xvmaddadp %x17, %x9, %x4 \n\t" -+ "xvmaddadp %x18, %x10, %x4 \n\t" -+ "xvmaddadp %x19, %x11, %x4 \n\t" -+ "xvmaddadp %x20, %x12, %x4 \n\t" -+ -+ "stxvd2x %x13, 0, %3 \n\t" -+ "stxvd2x %x14, %23, %3 \n\t" -+ "stxvd2x %x15, %24, %3 \n\t" -+ "stxvd2x %x16, %25, %3 \n\t" -+ -+ "addi %3, %3, 64 \n\t" -+ -+ "stxvd2x %x17, 0, %3 \n\t" -+ "stxvd2x %x18, %23, %3 \n\t" -+ "stxvd2x %x19, %24, %3 \n\t" -+ "stxvd2x %x20, %25, %3 \n" -+ -+ "#n=%1 x=%21=%2 y=%0=%3 alpha=%22 o16=%23 o32=%24 o48=%25\n" -+ "#t0=%x4 t1=%x5 t2=%x6 t3=%x7 t4=%x8 t5=%x9 t6=%x10 t7=%x11 t8=%x12 t9=%x13 t10=%x14 t11=%x15 t12=%x16 t13=%x17 t14=%x18 t15=%x19 t16=%x20" -+ : -+ "+m" (*y), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y), // 3 -+ "=wa" (t0), // 4 -+ "=wa" (t1), // 5 -+ "=wa" (t2), // 6 -+ "=wa" (t3), // 7 -+ "=wa" (t4), // 8 -+ "=wa" (t5), // 9 -+ "=wa" (t6), // 10 -+ "=wa" (t7), // 11 -+ "=wa" (t8), // 12 -+ "=wa" (t9), // 13 -+ "=wa" (t10), // 14 -+ "=wa" (t11), // 15 -+ "=wa" (t12), // 16 -+ "=wa" (t13), // 17 -+ "=wa" (t14), // 18 -+ "=wa" (t15), // 19 -+ "=wa" (t16) // 20 -+ : -+ "m" (*x), -+ "d" (alpha), // 22 -+ "b" (16), // 23 -+ "b" (32), // 24 -+ "b" (48) // 25 -+ : -+ "cr0" -+ ); -+ -+} - - -diff --git a/kernel/power/dcopy_microk_power8.c b/kernel/power/dcopy_microk_power8.c -index 04f7db5..261dc04 100644 ---- a/kernel/power/dcopy_microk_power8.c -+++ b/kernel/power/dcopy_microk_power8.c -@@ -35,140 +35,121 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_32 1 - --static void dcopy_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void dcopy_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) -+static void dcopy_kernel_32 (long n, double *x, double *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvd2x 50, 0, %2 \n\t" -- "lxvd2x 51, %5, %2 \n\t" -- "lxvd2x 52, %6, %2 \n\t" -- "lxvd2x 53, %7, %2 \n\t" -- "lxvd2x 54, %8, %2 \n\t" -- "lxvd2x 55, %9, %2 \n\t" -- "lxvd2x 56, %10, %2 \n\t" -- "lxvd2x 57, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "stxvd2x 40, 0, %1 \n\t" -- "stxvd2x 41, %5, %1 \n\t" -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "stxvd2x 42, %6, %1 \n\t" -- "stxvd2x 43, %7, %1 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "stxvd2x 44, %8, %1 \n\t" -- "stxvd2x 45, %9, %1 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "stxvd2x 46, %10, %1 \n\t" -- "stxvd2x 47, %11, %1 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "stxvd2x 50, 0, %1 \n\t" -- "stxvd2x 51, %5, %1 \n\t" -- "lxvd2x 50, 0, %2 \n\t" -- "lxvd2x 51, %5, %2 \n\t" -- "stxvd2x 52, %6, %1 \n\t" -- "stxvd2x 53, %7, %1 \n\t" -- "lxvd2x 52, %6, %2 \n\t" -- "lxvd2x 53, %7, %2 \n\t" -- "stxvd2x 54, %8, %1 \n\t" -- "stxvd2x 55, %9, %1 \n\t" -- "lxvd2x 54, %8, %2 \n\t" -- "lxvd2x 55, %9, %2 \n\t" -- "stxvd2x 56, %10, %1 \n\t" -- "stxvd2x 57, %11, %1 \n\t" -- "lxvd2x 56, %10, %2 \n\t" -- "lxvd2x 57, %11, %2 \n\t" -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "stxvd2x 40, 0, %1 \n\t" -- "stxvd2x 41, %5, %1 \n\t" -- "stxvd2x 42, %6, %1 \n\t" -- "stxvd2x 43, %7, %1 \n\t" -- "stxvd2x 44, %8, %1 \n\t" -- "stxvd2x 45, %9, %1 \n\t" -- "stxvd2x 46, %10, %1 \n\t" -- "stxvd2x 47, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvd2x 50, 0, %1 \n\t" -- "stxvd2x 51, %5, %1 \n\t" -- "stxvd2x 52, %6, %1 \n\t" -- "stxvd2x 53, %7, %1 \n\t" -- "stxvd2x 54, %8, %1 \n\t" -- "stxvd2x 55, %9, %1 \n\t" -- "stxvd2x 56, %10, %1 \n\t" -- "stxvd2x 57, %11, %1 \n\t" -- -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ "lxvd2x 32, 0, %2 \n\t" -+ "lxvd2x 33, %5, %2 \n\t" -+ "lxvd2x 34, %6, %2 \n\t" -+ "lxvd2x 35, %7, %2 \n\t" -+ "lxvd2x 36, %8, %2 \n\t" -+ "lxvd2x 37, %9, %2 \n\t" -+ "lxvd2x 38, %10, %2 \n\t" -+ "lxvd2x 39, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %5, %2 \n\t" -+ "lxvd2x 42, %6, %2 \n\t" -+ "lxvd2x 43, %7, %2 \n\t" -+ "lxvd2x 44, %8, %2 \n\t" -+ "lxvd2x 45, %9, %2 \n\t" -+ "lxvd2x 46, %10, %2 \n\t" -+ "lxvd2x 47, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "stxvd2x 32, 0, %3 \n\t" -+ "stxvd2x 33, %5, %3 \n\t" -+ "lxvd2x 32, 0, %2 \n\t" -+ "lxvd2x 33, %5, %2 \n\t" -+ "stxvd2x 34, %6, %3 \n\t" -+ "stxvd2x 35, %7, %3 \n\t" -+ "lxvd2x 34, %6, %2 \n\t" -+ "lxvd2x 35, %7, %2 \n\t" -+ "stxvd2x 36, %8, %3 \n\t" -+ "stxvd2x 37, %9, %3 \n\t" -+ "lxvd2x 36, %8, %2 \n\t" -+ "lxvd2x 37, %9, %2 \n\t" -+ "stxvd2x 38, %10, %3 \n\t" -+ "stxvd2x 39, %11, %3 \n\t" -+ "lxvd2x 38, %10, %2 \n\t" -+ "lxvd2x 39, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "stxvd2x 40, 0, %3 \n\t" -+ "stxvd2x 41, %5, %3 \n\t" -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %5, %2 \n\t" -+ "stxvd2x 42, %6, %3 \n\t" -+ "stxvd2x 43, %7, %3 \n\t" -+ "lxvd2x 42, %6, %2 \n\t" -+ "lxvd2x 43, %7, %2 \n\t" -+ "stxvd2x 44, %8, %3 \n\t" -+ "stxvd2x 45, %9, %3 \n\t" -+ "lxvd2x 44, %8, %2 \n\t" -+ "lxvd2x 45, %9, %2 \n\t" -+ "stxvd2x 46, %10, %3 \n\t" -+ "stxvd2x 47, %11, %3 \n\t" -+ "lxvd2x 46, %10, %2 \n\t" -+ "lxvd2x 47, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "stxvd2x 32, 0, %3 \n\t" -+ "stxvd2x 33, %5, %3 \n\t" -+ "stxvd2x 34, %6, %3 \n\t" -+ "stxvd2x 35, %7, %3 \n\t" -+ "stxvd2x 36, %8, %3 \n\t" -+ "stxvd2x 37, %9, %3 \n\t" -+ "stxvd2x 38, %10, %3 \n\t" -+ "stxvd2x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvd2x 40, 0, %3 \n\t" -+ "stxvd2x 41, %5, %3 \n\t" -+ "stxvd2x 42, %6, %3 \n\t" -+ "stxvd2x 43, %7, %3 \n\t" -+ "stxvd2x 44, %8, %3 \n\t" -+ "stxvd2x 45, %9, %3 \n\t" -+ "stxvd2x 46, %10, %3 \n\t" -+ "stxvd2x 47, %11, %3 \n" -+ -+ "#n=%1 x=%4=%2 y=%0=%3 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "=m" (*y), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y) // 3 -+ : -+ "m" (*x), -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -diff --git a/kernel/power/ddot.c b/kernel/power/ddot.c -index cef60a2..e43470e 100644 ---- a/kernel/power/ddot.c -+++ b/kernel/power/ddot.c -@@ -43,7 +43,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_8 - --static void ddot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) -+static FLOAT ddot_kernel_8 (BLASLONG n, FLOAT *x, FLOAT *y) - { - BLASLONG register i = 0; - FLOAT dot = 0.0; -@@ -62,8 +62,7 @@ static void ddot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) - i+=8 ; - - } -- *d += dot; -- -+ return dot; - } - - #endif -@@ -83,7 +82,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) - BLASLONG n1 = n & -16; - - if ( n1 ) -- ddot_kernel_8(n1, x, y , &dot ); -+ dot = ddot_kernel_8(n1, x, y); - - i = n1; - while(i < n) -diff --git a/kernel/power/ddot_microk_power8.c b/kernel/power/ddot_microk_power8.c -index b880492..4e6bc29 100644 ---- a/kernel/power/ddot_microk_power8.c -+++ b/kernel/power/ddot_microk_power8.c -@@ -34,145 +34,138 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_8 1 --static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *dot) __attribute__ ((noinline)); - --static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) -+static double ddot_kernel_8 (long n, double *x, double *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "dcbt %2, %12 \n\t" -- "dcbt %3, %12 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 48, 0, %3 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 49, %5, %3 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 50, %6, %3 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 51, %7, %3 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 52, %8, %3 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 53, %9, %3 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 54, %10, %3 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- "lxvd2x 55, %11, %3 \n\t" -- -- "addi %2, %2, 128 \n\t" -- "addi %3, %3, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %12 \n\t" -- "dcbt %3, %12 \n\t" -- -- "xvmaddadp 32, 40, 48 \n\t" -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 48, 0, %3 \n\t" -- "xvmaddadp 33, 41, 49 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 49, %5, %3 \n\t" -- "xvmaddadp 34, 42, 50 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 50, %6, %3 \n\t" -- "xvmaddadp 35, 43, 51 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 51, %7, %3 \n\t" -- "xvmaddadp 36, 44, 52 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 52, %8, %3 \n\t" -- "xvmaddadp 37, 45, 53 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 53, %9, %3 \n\t" -- "xvmaddadp 38, 46, 54 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 54, %10, %3 \n\t" -- "xvmaddadp 39, 47, 55 \n\t" -- -- "lxvd2x 47, %11, %2 \n\t" -- "lxvd2x 55, %11, %3 \n\t" -- -- -- "addi %2, %2, 128 \n\t" -- "addi %3, %3, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmaddadp 32, 40, 48 \n\t" -- "xvmaddadp 33, 41, 49 \n\t" -- "xvmaddadp 34, 42, 50 \n\t" -- "xvmaddadp 35, 43, 51 \n\t" -- "xvmaddadp 36, 44, 52 \n\t" -- "xvmaddadp 37, 45, 53 \n\t" -- "xvmaddadp 38, 46, 54 \n\t" -- "xvmaddadp 39, 47, 55 \n\t" -- -- "xvadddp 32, 32, 33 \n\t" -- "xvadddp 34, 34, 35 \n\t" -- "xvadddp 36, 36, 37 \n\t" -- "xvadddp 38, 38, 39 \n\t" -- -- "xvadddp 32, 32, 34 \n\t" -- "xvadddp 36, 36, 38 \n\t" -- -- "xvadddp 32, 32, 36 \n\t" -- -- "xxswapd 33, 32 \n\t" -- -- "xsadddp 32, 32, 33 \n\t" -- -- "stxsdx 32, 0, %4 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (y1), // 3 -- "r" (dot), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112), // 11 -- "r" (pre) // 12 -- : "cr0", "%0", "%2" , "%3", "memory" -- ); -- --} -- -- -+ double dot; -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ "dcbt 0, %3 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 48, 0, %3 \n\t" -+ "lxvd2x 41, %10, %2 \n\t" -+ "lxvd2x 49, %10, %3 \n\t" -+ "lxvd2x 42, %11, %2 \n\t" -+ "lxvd2x 50, %11, %3 \n\t" -+ "lxvd2x 43, %12, %2 \n\t" -+ "lxvd2x 51, %12, %3 \n\t" -+ "lxvd2x 44, %13, %2 \n\t" -+ "lxvd2x %x4, %13, %3 \n\t" -+ "lxvd2x 45, %14, %2 \n\t" -+ "lxvd2x %x5, %14, %3 \n\t" -+ "lxvd2x 46, %15, %2 \n\t" -+ "lxvd2x %x6, %15, %3 \n\t" -+ "lxvd2x 47, %16, %2 \n\t" -+ "lxvd2x %x7, %16, %3 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ "addi %3, %3, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmaddadp 32, 40, 48 \n\t" -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 48, 0, %3 \n\t" -+ "xvmaddadp 33, 41, 49 \n\t" -+ "lxvd2x 41, %10, %2 \n\t" -+ "lxvd2x 49, %10, %3 \n\t" -+ "xvmaddadp 34, 42, 50 \n\t" -+ "lxvd2x 42, %11, %2 \n\t" -+ "lxvd2x 50, %11, %3 \n\t" -+ "xvmaddadp 35, 43, 51 \n\t" -+ "lxvd2x 43, %12, %2 \n\t" -+ "lxvd2x 51, %12, %3 \n\t" -+ "xvmaddadp 36, 44, %x4 \n\t" -+ "lxvd2x 44, %13, %2 \n\t" -+ "lxvd2x %x4, %13, %3 \n\t" -+ "xvmaddadp 37, 45, %x5 \n\t" -+ "lxvd2x 45, %14, %2 \n\t" -+ "lxvd2x %x5, %14, %3 \n\t" -+ "xvmaddadp 38, 46, %x6 \n\t" -+ "lxvd2x 46, %15, %2 \n\t" -+ "lxvd2x %x6, %15, %3 \n\t" -+ "xvmaddadp 39, 47, %x7 \n\t" -+ "lxvd2x 47, %16, %2 \n\t" -+ "lxvd2x %x7, %16, %3 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ "addi %3, %3, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmaddadp 32, 40, 48 \n\t" -+ "xvmaddadp 33, 41, 49 \n\t" -+ "xvmaddadp 34, 42, 50 \n\t" -+ "xvmaddadp 35, 43, 51 \n\t" -+ "xvmaddadp 36, 44, %x4 \n\t" -+ "xvmaddadp 37, 45, %x5 \n\t" -+ "xvmaddadp 38, 46, %x6 \n\t" -+ "xvmaddadp 39, 47, %x7 \n\t" -+ -+ "xvadddp 32, 32, 33 \n\t" -+ "xvadddp 34, 34, 35 \n\t" -+ "xvadddp 36, 36, 37 \n\t" -+ "xvadddp 38, 38, 39 \n\t" -+ -+ "xvadddp 32, 32, 34 \n\t" -+ "xvadddp 36, 36, 38 \n\t" -+ -+ "xvadddp 32, 32, 36 \n\t" -+ -+ "xxswapd 33, 32 \n\t" -+ -+ "xsadddp %x0, 32, 33 \n" -+ -+ "#dot=%0 n=%1 x=%8=%2 y=%9=%3 o16=%10 o32=%11 o48=%12 o64=%13 o80=%14 o96=%15 o122=%16\n" -+ "#t0=%x4 t1=%x5 t2=%x6 t3=%x7" -+ : -+ "=d" (dot), // 0 -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y), // 3 -+ "=wa" (t0), // 4 -+ "=wa" (t1), // 5 -+ "=wa" (t2), // 6 -+ "=wa" (t3) // 7 -+ : -+ "m" (*x), -+ "m" (*y), -+ "b" (16), // 10 -+ "b" (32), // 11 -+ "b" (48), // 12 -+ "b" (64), // 13 -+ "b" (80), // 14 -+ "b" (96), // 15 -+ "b" (112) // 16 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+ -+ return dot; -+} -diff --git a/kernel/power/dgemv_n.c b/kernel/power/dgemv_n.c -index 812d09d..57f9f9e 100644 ---- a/kernel/power/dgemv_n.c -+++ b/kernel/power/dgemv_n.c -@@ -47,18 +47,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_4x4 - --static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha) -+static void dgemv_kernel_4x4(BLASLONG n, FLOAT *a_ptr, BLASLONG lda, FLOAT *xo, FLOAT *y, FLOAT alpha) - { - BLASLONG i; -- FLOAT *a0,*a1,*a2,*a3; - FLOAT x[4] __attribute__ ((aligned (16)));; -- a0 = ap[0]; -- a1 = ap[1]; -- a2 = ap[2]; -- a3 = ap[3]; -+ FLOAT *a0 = a_ptr; -+ FLOAT *a1 = a0 + lda; -+ FLOAT *a2 = a1 + lda; -+ FLOAT *a3 = a2 + lda; -+ - - for ( i=0; i<4; i++) -- x[i] = xo[i] * *alpha; -+ x[i] = xo[i] * alpha; - - for ( i=0; i< n; i+=4 ) - { -@@ -73,16 +73,13 @@ static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT - - #ifndef HAVE_KERNEL_4x2 - --static void dgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha) -+static void dgemv_kernel_4x2(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *xo, FLOAT *y, FLOAT alpha) - { - BLASLONG i; -- FLOAT *a0,*a1; - FLOAT x[4] __attribute__ ((aligned (16)));; -- a0 = ap[0]; -- a1 = ap[1]; - - for ( i=0; i<2; i++) -- x[i] = xo[i] * *alpha; -+ x[i] = xo[i] * alpha; - - for ( i=0; i< n; i+=4 ) - { -@@ -98,15 +95,13 @@ static void dgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT - - #ifndef HAVE_KERNEL_4x1 - --static void dgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *xo, FLOAT *y, FLOAT *alpha) -+static void dgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *xo, FLOAT *y, FLOAT alpha) - { - BLASLONG i; -- FLOAT *a0; - FLOAT x[4] __attribute__ ((aligned (16)));; -- a0 = ap; - - for ( i=0; i<1; i++) -- x[i] = xo[i] * *alpha; -+ x[i] = xo[i] * alpha; - - for ( i=0; i< n; i+=4 ) - { -@@ -141,7 +136,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - { - - BLASLONG i; -- BLASLONG j; - FLOAT *a_ptr; - FLOAT *x_ptr; - FLOAT *y_ptr; -@@ -151,13 +145,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - BLASLONG m3; - BLASLONG n2; - BLASLONG lda4 = lda << 2; -- FLOAT *ap[4] __attribute__ ((aligned (16)));; - FLOAT xbuffer[8] __attribute__ ((aligned (16)));; -- FLOAT alpha_r[4] __attribute__ ((aligned (16)));; - FLOAT *ybuffer; - -- alpha_r[0] = alpha; -- - if ( m < 1 ) return(0); - if ( n < 1 ) return(0); - -@@ -187,11 +177,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - a_ptr = a; - x_ptr = x; - -- ap[0] = a_ptr; -- ap[1] = a_ptr + lda; -- ap[2] = ap[1] + lda; -- ap[3] = ap[2] + lda; -- - if ( inc_y != 1 ) - memset(ybuffer,0,NB*8); - else -@@ -203,18 +188,14 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - - for( i = 0; i < n1 ; i++) - { -- dgemv_kernel_4x4(NB,ap,x_ptr,ybuffer,alpha_r); -- ap[0] += lda4; -- ap[1] += lda4; -- ap[2] += lda4; -- ap[3] += lda4; -+ dgemv_kernel_4x4(NB,a_ptr,lda,x_ptr,ybuffer,alpha); - a_ptr += lda4; - x_ptr += 4; - } - - if ( n2 & 2 ) - { -- dgemv_kernel_4x2(NB,ap,x_ptr,ybuffer,alpha_r); -+ dgemv_kernel_4x2(NB,a_ptr,a_ptr+lda,x_ptr,ybuffer,alpha); - a_ptr += lda*2; - x_ptr += 2; - } -@@ -222,7 +203,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - - if ( n2 & 1 ) - { -- dgemv_kernel_4x1(NB,a_ptr,x_ptr,ybuffer,alpha_r); -+ dgemv_kernel_4x1(NB,a_ptr,x_ptr,ybuffer,alpha); - a_ptr += lda; - x_ptr += 1; - -@@ -243,11 +224,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - x_ptr += inc_x; - xbuffer[3] = x_ptr[0]; - x_ptr += inc_x; -- dgemv_kernel_4x4(NB,ap,xbuffer,ybuffer,alpha_r); -- ap[0] += lda4; -- ap[1] += lda4; -- ap[2] += lda4; -- ap[3] += lda4; -+ dgemv_kernel_4x4(NB,a_ptr,lda,xbuffer,ybuffer,alpha); - a_ptr += lda4; - } - -@@ -255,7 +232,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO - { - xbuffer[0] = x_ptr[0]; - x_ptr += inc_x; -- dgemv_kernel_4x1(NB,a_ptr,xbuffer,ybuffer,alpha_r); -+ dgemv_kernel_4x1(NB,a_ptr,xbuffer,ybuffer,alpha); - a_ptr += lda; - - } -diff --git a/kernel/power/dgemv_n_microk_power8.c b/kernel/power/dgemv_n_microk_power8.c -index 9eabe55..5b42bbb 100644 ---- a/kernel/power/dgemv_n_microk_power8.c -+++ b/kernel/power/dgemv_n_microk_power8.c -@@ -35,267 +35,264 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_4x4 1 - --static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha) __attribute__ ((noinline)); -- --static void dgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha) -+static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y, double alpha) - { -- BLASLONG i=n; -- BLASLONG o8 = 8; -- BLASLONG o16 = 16; -- BLASLONG o24 = 24; -- BLASLONG pre = 384; -- -- FLOAT *a0,*a1,*a2,*a3; -- FLOAT *y1=y+1; -- FLOAT x[4] __attribute__ ((aligned (16)));; -- a0 = ap[0]+1; -- a1 = ap[1]+1; -- a2 = ap[2]+1; -- a3 = ap[3]+1; -- -- x[0]=xo[0] * *alpha; -- x[1]=xo[1] * *alpha; -- x[2]=xo[2] * *alpha; -- x[3]=xo[3] * *alpha; -+ double *a0; -+ double *a1; -+ double *a2; -+ double *a3; -+ -+ __asm__ -+ ( -+ "lxvd2x 34, 0, %9 \n\t" // x0, x1 -+ "lxvd2x 35, %10, %9 \n\t" // x2, x3 -+ "xxspltd 32, %x8, 0 \n\t" // alpha, alpha -+ -+ "sldi %6, %4, 3 \n\t" // lda * sizeof (double) -+ -+ "xvmuldp 34, 34, 32 \n\t" // x0 * alpha, x1 * alpha -+ "xvmuldp 35, 35, 32 \n\t" // x2 * alpha, x3 * alpha -+ -+ "add %4, %3, %6 \n\t" // a1 = a0 + lda -+ "add %6, %6, %6 \n\t" // 2 * lda -+ -+ "xxspltd 32, 34, 0 \n\t" // x0 * alpha, x0 * alpha -+ "xxspltd 33, 34, 1 \n\t" // x1 * alpha, x1 * alpha -+ "xxspltd 34, 35, 0 \n\t" // x2 * alpha, x2 * alpha -+ "xxspltd 35, 35, 1 \n\t" // x3 * alpha, x3 * alpha -+ -+ "add %5, %3, %6 \n\t" // a2 = a0 + 2 * lda -+ "add %6, %4, %6 \n\t" // a3 = a1 + 2 * lda -+ -+ "dcbt 0, %3 \n\t" -+ "dcbt 0, %4 \n\t" -+ "dcbt 0, %5 \n\t" -+ "dcbt 0, %6 \n\t" - -+ "lxvd2x 40, 0, %3 \n\t" // a0[0], a0[1] -+ "lxvd2x 41, %10, %3 \n\t" // a0[2], a0[3] - -- __asm__ __volatile__ -- ( -- "lxvdsx 32, 0 , %1 \n\t" // x0 -- "lxvdsx 33,%3 , %1 \n\t" // x1 -- "lxvdsx 34,%4 , %1 \n\t" // x2 -- "lxvdsx 35,%5 , %1 \n\t" // x3 -- "addi %2 , %2 , -8 \n\t" -- "addi %6 , %6 , -8 \n\t" -- "addi %7 , %7 , -8 \n\t" -- "addi %8 , %8 , -8 \n\t" -- "addi %9 , %9 , -8 \n\t" -- -- "lxvd2x 48, 0, %6 \n\t" // a0[0], a0[1] -- "lxvd2x 49,%4, %6 \n\t" // a0[2], a0[3] -- -- "lxvd2x 50, 0, %7 \n\t" // a1[0], a1[1] -- "lxvd2x 51,%4, %7 \n\t" // a1[2], a1[3] -+ "lxvd2x 42, 0, %4 \n\t" // a1[0], a1[1] -+ "lxvd2x 43, %10, %4 \n\t" // a1[2], a1[3] - -- "lxvd2x 52, 0, %8 \n\t" // a2[0], a2[1] -- "lxvd2x 53,%4, %8 \n\t" // a2[2], a2[3] -+ "lxvd2x 44, 0, %5 \n\t" // a2[0], a2[1] -+ "lxvd2x 45, %10, %5 \n\t" // a2[2], a2[3] - -- "lxvd2x 54, 0, %9 \n\t" // a3[0], a3[1] -- "lxvd2x 55,%4, %9 \n\t" // a3[2], a3[3] -+ "lxvd2x 46, 0, %6 \n\t" // a3[0], a3[1] -+ "lxvd2x 47, %10, %6 \n\t" // a3[2], a3[3] - -- "addi %6, %6, 32 \n\t" -- "addi %7, %7, 32 \n\t" -- "addi %8, %8, 32 \n\t" -- "addi %9, %9, 32 \n\t" -+ "dcbt 0, %2 \n\t" - -- "addic. %0 , %0 , -4 \n\t" -- "ble 2f \n\t" -+ "addi %3, %3, 32 \n\t" -+ "addi %4, %4, 32 \n\t" -+ "addi %5, %5, 32 \n\t" -+ "addi %6, %6, 32 \n\t" - -- ".align 5 \n\t" -- "1: \n\t" -+ "addic. %1, %1, -4 \n\t" -+ "ble 2f \n\t" - -- "dcbt %2, %10 \n\t" -+ ".p2align 5 \n" -+ "1: \n\t" - -- "lxvd2x 40, 0, %2 \n\t" // y0, y1 -- "lxvd2x 41,%4, %2 \n\t" // y2, y3 -- -- "dcbt %6, %10 \n\t" -- "dcbt %7, %10 \n\t" -- "dcbt %8, %10 \n\t" -- "dcbt %9, %10 \n\t" -+ "lxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "lxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "xvmaddadp 40, 48, 32 \n\t" -- "xvmaddadp 41, 49, 32 \n\t" -+ "xvmaddadp 36, 40, 32 \n\t" -+ "xvmaddadp 37, 41, 32 \n\t" - -- "lxvd2x 48, 0, %6 \n\t" // a0[0], a0[1] -- "lxvd2x 49,%4, %6 \n\t" // a0[2], a0[3] -+ "lxvd2x 40, 0, %3 \n\t" // a0[0], a0[1] -+ "lxvd2x 41, %10, %3 \n\t" // a0[2], a0[3] - -- "xvmaddadp 40, 50, 33 \n\t" -- "addi %6, %6, 32 \n\t" -- "xvmaddadp 41, 51, 33 \n\t" -+ "xvmaddadp 36, 42, 33 \n\t" -+ "addi %3, %3, 32 \n\t" -+ "xvmaddadp 37, 43, 33 \n\t" - -- "lxvd2x 50, 0, %7 \n\t" // a1[0], a1[1] -- "lxvd2x 51,%4, %7 \n\t" // a1[2], a1[3] -+ "lxvd2x 42, 0, %4 \n\t" // a1[0], a1[1] -+ "lxvd2x 43, %10, %4 \n\t" // a1[2], a1[3] - -- "xvmaddadp 40, 52, 34 \n\t" -- "addi %7, %7, 32 \n\t" -- "xvmaddadp 41, 53, 34 \n\t" -+ "xvmaddadp 36, 44, 34 \n\t" -+ "addi %4, %4, 32 \n\t" -+ "xvmaddadp 37, 45, 34 \n\t" - -- "lxvd2x 52, 0, %8 \n\t" // a2[0], a2[1] -- "lxvd2x 53,%4, %8 \n\t" // a2[2], a2[3] -+ "lxvd2x 44, 0, %5 \n\t" // a2[0], a2[1] -+ "lxvd2x 45, %10, %5 \n\t" // a2[2], a2[3] - -- "xvmaddadp 40, 54, 35 \n\t" -- "addi %8, %8, 32 \n\t" -- "xvmaddadp 41, 55, 35 \n\t" -+ "xvmaddadp 36, 46, 35 \n\t" -+ "addi %5, %5, 32 \n\t" -+ "xvmaddadp 37, 47, 35 \n\t" - -- "stxvd2x 40, 0, %2 \n\t" // y0, y1 -- "stxvd2x 41,%4, %2 \n\t" // y2, y3 -+ "stxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "stxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "lxvd2x 54, 0, %9 \n\t" // a3[0], a3[1] -- "lxvd2x 55,%4, %9 \n\t" // a3[2], a3[3] -+ "lxvd2x 46, 0, %6 \n\t" // a3[0], a3[1] -+ "lxvd2x 47, %10, %6 \n\t" // a3[2], a3[3] - -- "addi %9, %9, 32 \n\t" -- "addi %2, %2, 32 \n\t" -+ "addi %6, %6, 32 \n\t" -+ "addi %2, %2, 32 \n\t" - -- "addic. %0 , %0 , -4 \n\t" -- "ble 2f \n\t" -+ "addic. %1, %1, -4 \n\t" -+ "ble 2f \n\t" - - -- "lxvd2x 40, 0, %2 \n\t" // y0, y1 -- "lxvd2x 41,%4, %2 \n\t" // y2, y3 -- -- "xvmaddadp 40, 48, 32 \n\t" -- "xvmaddadp 41, 49, 32 \n\t" -+ "lxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "lxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "lxvd2x 48, 0, %6 \n\t" // a0[0], a0[1] -- "lxvd2x 49,%4, %6 \n\t" // a0[2], a0[3] -+ "xvmaddadp 36, 40, 32 \n\t" -+ "xvmaddadp 37, 41, 32 \n\t" - -- "xvmaddadp 40, 50, 33 \n\t" -- "addi %6, %6, 32 \n\t" -- "xvmaddadp 41, 51, 33 \n\t" -+ "lxvd2x 40, 0, %3 \n\t" // a0[0], a0[1] -+ "lxvd2x 41, %10, %3 \n\t" // a0[2], a0[3] - -- "lxvd2x 50, 0, %7 \n\t" // a1[0], a1[1] -- "lxvd2x 51,%4, %7 \n\t" // a1[2], a1[3] -+ "xvmaddadp 36, 42, 33 \n\t" -+ "addi %3, %3, 32 \n\t" -+ "xvmaddadp 37, 43, 33 \n\t" - -- "xvmaddadp 40, 52, 34 \n\t" -- "addi %7, %7, 32 \n\t" -- "xvmaddadp 41, 53, 34 \n\t" -+ "lxvd2x 42, 0, %4 \n\t" // a1[0], a1[1] -+ "lxvd2x 43, %10, %4 \n\t" // a1[2], a1[3] - -- "lxvd2x 52, 0, %8 \n\t" // a2[0], a2[1] -- "lxvd2x 53,%4, %8 \n\t" // a2[2], a2[3] -+ "xvmaddadp 36, 44, 34 \n\t" -+ "addi %4, %4, 32 \n\t" -+ "xvmaddadp 37, 45, 34 \n\t" - -- "xvmaddadp 40, 54, 35 \n\t" -- "addi %8, %8, 32 \n\t" -- "xvmaddadp 41, 55, 35 \n\t" -+ "lxvd2x 44, 0, %5 \n\t" // a2[0], a2[1] -+ "lxvd2x 45, %10, %5 \n\t" // a2[2], a2[3] - -- "stxvd2x 40, 0, %2 \n\t" // y0, y1 -- "stxvd2x 41,%4, %2 \n\t" // y2, y3 -+ "xvmaddadp 36, 46, 35 \n\t" -+ "addi %5, %5, 32 \n\t" -+ "xvmaddadp 37, 47, 35 \n\t" - -- "lxvd2x 54, 0, %9 \n\t" // a3[0], a3[1] -- "lxvd2x 55,%4, %9 \n\t" // a3[2], a3[3] -+ "stxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "stxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "addi %9, %9, 32 \n\t" -- "addi %2, %2, 32 \n\t" -+ "lxvd2x 46, 0, %6 \n\t" // a3[0], a3[1] -+ "lxvd2x 47, %10, %6 \n\t" // a3[2], a3[3] - -- "addic. %0 , %0 , -4 \n\t" -- "ble 2f \n\t" -+ "addi %6, %6, 32 \n\t" -+ "addi %2, %2, 32 \n\t" - -+ "addic. %1, %1, -4 \n\t" -+ "ble 2f \n\t" - -- "lxvd2x 40, 0, %2 \n\t" // y0, y1 -- "lxvd2x 41,%4, %2 \n\t" // y2, y3 -- -- "xvmaddadp 40, 48, 32 \n\t" -- "xvmaddadp 41, 49, 32 \n\t" - -- "lxvd2x 48, 0, %6 \n\t" // a0[0], a0[1] -- "lxvd2x 49,%4, %6 \n\t" // a0[2], a0[3] -+ "lxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "lxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "xvmaddadp 40, 50, 33 \n\t" -- "addi %6, %6, 32 \n\t" -- "xvmaddadp 41, 51, 33 \n\t" -+ "xvmaddadp 36, 40, 32 \n\t" -+ "xvmaddadp 37, 41, 32 \n\t" - -- "lxvd2x 50, 0, %7 \n\t" // a1[0], a1[1] -- "lxvd2x 51,%4, %7 \n\t" // a1[2], a1[3] -+ "lxvd2x 40, 0, %3 \n\t" // a0[0], a0[1] -+ "lxvd2x 41, %10, %3 \n\t" // a0[2], a0[3] - -- "xvmaddadp 40, 52, 34 \n\t" -- "addi %7, %7, 32 \n\t" -- "xvmaddadp 41, 53, 34 \n\t" -+ "xvmaddadp 36, 42, 33 \n\t" -+ "addi %3, %3, 32 \n\t" -+ "xvmaddadp 37, 43, 33 \n\t" - -- "lxvd2x 52, 0, %8 \n\t" // a2[0], a2[1] -- "lxvd2x 53,%4, %8 \n\t" // a2[2], a2[3] -+ "lxvd2x 42, 0, %4 \n\t" // a1[0], a1[1] -+ "lxvd2x 43, %10, %4 \n\t" // a1[2], a1[3] - -- "xvmaddadp 40, 54, 35 \n\t" -- "addi %8, %8, 32 \n\t" -- "xvmaddadp 41, 55, 35 \n\t" -+ "xvmaddadp 36, 44, 34 \n\t" -+ "addi %4, %4, 32 \n\t" -+ "xvmaddadp 37, 45, 34 \n\t" - -- "stxvd2x 40, 0, %2 \n\t" // y0, y1 -- "stxvd2x 41,%4, %2 \n\t" // y2, y3 -+ "lxvd2x 44, 0, %5 \n\t" // a2[0], a2[1] -+ "lxvd2x 45, %10, %5 \n\t" // a2[2], a2[3] - -- "lxvd2x 54, 0, %9 \n\t" // a3[0], a3[1] -- "lxvd2x 55,%4, %9 \n\t" // a3[2], a3[3] -+ "xvmaddadp 36, 46, 35 \n\t" -+ "addi %5, %5, 32 \n\t" -+ "xvmaddadp 37, 47, 35 \n\t" - -- "addi %9, %9, 32 \n\t" -- "addi %2, %2, 32 \n\t" -+ "stxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "stxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "addic. %0 , %0 , -4 \n\t" -- "ble 2f \n\t" -+ "lxvd2x 46, 0, %6 \n\t" // a3[0], a3[1] -+ "lxvd2x 47, %10, %6 \n\t" // a3[2], a3[3] - -+ "addi %6, %6, 32 \n\t" -+ "addi %2, %2, 32 \n\t" - -- "lxvd2x 40, 0, %2 \n\t" // y0, y1 -- "lxvd2x 41,%4, %2 \n\t" // y2, y3 -- -- "xvmaddadp 40, 48, 32 \n\t" -- "xvmaddadp 41, 49, 32 \n\t" -+ "addic. %1, %1, -4 \n\t" -+ "ble 2f \n\t" - -- "lxvd2x 48, 0, %6 \n\t" // a0[0], a0[1] -- "lxvd2x 49,%4, %6 \n\t" // a0[2], a0[3] - -- "xvmaddadp 40, 50, 33 \n\t" -- "addi %6, %6, 32 \n\t" -- "xvmaddadp 41, 51, 33 \n\t" -+ "lxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "lxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "lxvd2x 50, 0, %7 \n\t" // a1[0], a1[1] -- "lxvd2x 51,%4, %7 \n\t" // a1[2], a1[3] -+ "xvmaddadp 36, 40, 32 \n\t" -+ "xvmaddadp 37, 41, 32 \n\t" - -- "xvmaddadp 40, 52, 34 \n\t" -- "addi %7, %7, 32 \n\t" -- "xvmaddadp 41, 53, 34 \n\t" -+ "lxvd2x 40, 0, %3 \n\t" // a0[0], a0[1] -+ "lxvd2x 41, %10, %3 \n\t" // a0[2], a0[3] - -- "lxvd2x 52, 0, %8 \n\t" // a2[0], a2[1] -- "lxvd2x 53,%4, %8 \n\t" // a2[2], a2[3] -+ "xvmaddadp 36, 42, 33 \n\t" -+ "addi %3, %3, 32 \n\t" -+ "xvmaddadp 37, 43, 33 \n\t" - -- "xvmaddadp 40, 54, 35 \n\t" -- "addi %8, %8, 32 \n\t" -- "xvmaddadp 41, 55, 35 \n\t" -+ "lxvd2x 42, 0, %4 \n\t" // a1[0], a1[1] -+ "lxvd2x 43, %10, %4 \n\t" // a1[2], a1[3] - -- "stxvd2x 40, 0, %2 \n\t" // y0, y1 -- "stxvd2x 41,%4, %2 \n\t" // y2, y3 -+ "xvmaddadp 36, 44, 34 \n\t" -+ "addi %4, %4, 32 \n\t" -+ "xvmaddadp 37, 45, 34 \n\t" - -- "lxvd2x 54, 0, %9 \n\t" // a3[0], a3[1] -- "lxvd2x 55,%4, %9 \n\t" // a3[2], a3[3] -+ "lxvd2x 44, 0, %5 \n\t" // a2[0], a2[1] -+ "lxvd2x 45, %10, %5 \n\t" // a2[2], a2[3] - -- "addi %9, %9, 32 \n\t" -- "addi %2, %2, 32 \n\t" -+ "xvmaddadp 36, 46, 35 \n\t" -+ "addi %5, %5, 32 \n\t" -+ "xvmaddadp 37, 47, 35 \n\t" - -- "addic. %0 , %0 , -4 \n\t" -- "bgt 1b \n\t" -+ "stxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "stxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "2: \n\t" -+ "lxvd2x 46, 0, %6 \n\t" // a3[0], a3[1] -+ "lxvd2x 47, %10, %6 \n\t" // a3[2], a3[3] - -- "lxvd2x 40, 0, %2 \n\t" // y0, y1 -- "lxvd2x 41,%4, %2 \n\t" // y2, y3 -+ "addi %6, %6, 32 \n\t" -+ "addi %2, %2, 32 \n\t" - -- "xvmaddadp 40, 48, 32 \n\t" -- "xvmaddadp 41, 49, 32 \n\t" -+ "addic. %1, %1, -4 \n\t" -+ "bgt 1b \n" - -- "xvmaddadp 40, 50, 33 \n\t" -- "xvmaddadp 41, 51, 33 \n\t" -+ "2: \n\t" - -- "xvmaddadp 40, 52, 34 \n\t" -- "xvmaddadp 41, 53, 34 \n\t" -+ "lxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "lxvd2x 37, %10, %2 \n\t" // y2, y3 - -- "xvmaddadp 40, 54, 35 \n\t" -- "xvmaddadp 41, 55, 35 \n\t" -+ "xvmaddadp 36, 40, 32 \n\t" -+ "xvmaddadp 37, 41, 32 \n\t" - -- "stxvd2x 40, 0, %2 \n\t" // y0, y1 -- "stxvd2x 41,%4, %2 \n\t" // y2, y3 -+ "xvmaddadp 36, 42, 33 \n\t" -+ "xvmaddadp 37, 43, 33 \n\t" - -- : -- : -- "r" (i), // 0 -- "r" (x), // 1 -- "r" (y1), // 2 -- "r" (o8), // 3 -- "r" (o16), // 4 -- "r" (o24), // 5 -- "r" (a0), // 6 -- "r" (a1), // 7 -- "r" (a2), // 8 -- "r" (a3), // 9 -- "r" (pre) // 10 -- : "cr0", "%0", "%2" , "%6", "%7", "%8", "%9", "memory" -- ); -+ "xvmaddadp 36, 44, 34 \n\t" -+ "xvmaddadp 37, 45, 34 \n\t" - --} -+ "xvmaddadp 36, 46, 35 \n\t" -+ "xvmaddadp 37, 47, 35 \n\t" - -+ "stxvd2x 36, 0, %2 \n\t" // y0, y1 -+ "stxvd2x 37, %10, %2 \n" // y2, y3 - -+ "#n=%1 ap=%11 lda=%12 x=%7=%9 y=%0=%2 alpha=%8 o16=%10\n" -+ "#a0=%3 a1=%4 a2=%5 a3=%6" -+ : -+ "=m" (*y), -+ "+r" (n), // 1 -+ "+b" (y), // 2 -+ "=b" (a0), // 3 -+ "=b" (a1), // 4 -+ "=&b" (a2), // 5 -+ "=&b" (a3) // 6 -+ : -+ "m" (*x), -+ "d" (alpha), // 8 -+ "r" (x), // 9 -+ "b" (16), // 10 -+ "3" (ap), // 11 -+ "4" (lda) // 12 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -diff --git a/kernel/power/drot.c b/kernel/power/drot.c -index c93f69b..3e10748 100644 ---- a/kernel/power/drot.c -+++ b/kernel/power/drot.c -@@ -46,7 +46,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_16 - --static void drot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) -+static void drot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) - { - - BLASLONG i=0; -@@ -56,8 +56,6 @@ static void drot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) - FLOAT y00, y01, y02, y03; - FLOAT *x1=x; - FLOAT *y1=y; -- FLOAT c1=*c; -- FLOAT s1=*s; - - while ( i 0 ) - { -- c1[0]=c; -- c1[1]=c; -- c1[2]=c; -- c1[3]=c; -- s1[0]=s; -- s1[1]=s; -- s1[2]=s; -- s1[3]=s; -- drot_kernel_16(n1, x1, y1, c1, s1); -+ drot_kernel_16(n1, x1, y1, c, s); - i=n1; - } - -diff --git a/kernel/power/drot_microk_power8.c b/kernel/power/drot_microk_power8.c -index 4444ac7..016b776 100644 ---- a/kernel/power/drot_microk_power8.c -+++ b/kernel/power/drot_microk_power8.c -@@ -38,174 +38,176 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_16 1 - --static void drot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) __attribute__ ((noinline)); -- --static void drot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) -+static void drot_kernel_16 (long n, double *x, double *y, double c, double s) - { -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ __vector double t4; -+ __vector double t5; -+ __vector double t6; -+ __vector double t7; - -+ __asm__ -+ ( -+ "xxspltd 36, %x13, 0 \n\t" // load c to both dwords -+ "xxspltd 37, %x14, 0 \n\t" // load s to both dwords - -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *x2=x+1; -- FLOAT *y2=y+1; -- -- __asm__ __volatile__ -- ( -- -- "lxsdx 36 , %5, %3 \n\t" // load c -- "lxsdx 37 , %5, %4 \n\t" // load s -- "addi %8 , %8, -8 \n\t" -- "addi %9 , %9, -8 \n\t" -- -- "xxspltd 36 , 36, 0 \n\t" -- "xxspltd 37 , 37, 0 \n\t" -- -- "lxvd2x 32, 0, %1 \n\t" // load x -- "lxvd2x 33, %5, %1 \n\t" -- "lxvd2x 34, %6, %1 \n\t" -- "lxvd2x 35, %7, %1 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" // load y -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- -- "addi %1, %1, 64 \n\t" -- "addi %2, %2, 64 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "xvmuldp 48, 32, 36 \n\t" // c * x -- "xvmuldp 49, 33, 36 \n\t" -- "xvmuldp 50, 34, 36 \n\t" -- "xvmuldp 51, 35, 36 \n\t" -- -- "xvmuldp 56, 40, 36 \n\t" // c * y -- "xvmuldp 57, 41, 36 \n\t" -- "xvmuldp 58, 42, 36 \n\t" -- "xvmuldp 59, 43, 36 \n\t" -- -- "xvmuldp 52, 32, 37 \n\t" // s * x -- "xvmuldp 53, 33, 37 \n\t" -- -- "lxvd2x 32, 0, %1 \n\t" // load x -- "lxvd2x 33, %5, %1 \n\t" -- -- "xvmuldp 54, 34, 37 \n\t" -- "xvmuldp 55, 35, 37 \n\t" -- -- "lxvd2x 34, %6, %1 \n\t" -- "lxvd2x 35, %7, %1 \n\t" -- -- "xvmuldp 60, 40, 37 \n\t" // s * y -- "xvmuldp 61, 41, 37 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" // load y -- "lxvd2x 41, %5, %2 \n\t" -- -- "xvmuldp 62, 42, 37 \n\t" -- "xvmuldp 63, 43, 37 \n\t" -- -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- -- "xvadddp 48, 48 , 60 \n\t" // c * x + s * y -- "xvadddp 49, 49 , 61 \n\t" // c * x + s * y -- -- "addi %1, %1, 64 \n\t" -- "addi %2, %2, 64 \n\t" -- -- "xvadddp 50, 50 , 62 \n\t" // c * x + s * y -- "xvadddp 51, 51 , 63 \n\t" // c * x + s * y -- -- "xvsubdp 56, 56 , 52 \n\t" // c * y - s * x -- "xvsubdp 57, 57 , 53 \n\t" // c * y - s * x -- "xvsubdp 58, 58 , 54 \n\t" // c * y - s * x -- "xvsubdp 59, 59 , 55 \n\t" // c * y - s * x -- -- "stxvd2x 48, 0, %8 \n\t" // store x -- "stxvd2x 49, %5, %8 \n\t" -- "stxvd2x 50, %6, %8 \n\t" -- "stxvd2x 51, %7, %8 \n\t" -- -- "stxvd2x 56, 0, %9 \n\t" // store y -- "stxvd2x 57, %5, %9 \n\t" -- "stxvd2x 58, %6, %9 \n\t" -- "stxvd2x 59, %7, %9 \n\t" -- -- "addi %8, %8, 64 \n\t" -- "addi %9, %9, 64 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmuldp 48, 32, 36 \n\t" // c * x -- "xvmuldp 49, 33, 36 \n\t" -- "xvmuldp 50, 34, 36 \n\t" -- "xvmuldp 51, 35, 36 \n\t" -- -- "xvmuldp 56, 40, 36 \n\t" // c * y -- "xvmuldp 57, 41, 36 \n\t" -- "xvmuldp 58, 42, 36 \n\t" -- "xvmuldp 59, 43, 36 \n\t" -- -- "xvmuldp 52, 32, 37 \n\t" // s * x -- "xvmuldp 53, 33, 37 \n\t" -- "xvmuldp 54, 34, 37 \n\t" -- "xvmuldp 55, 35, 37 \n\t" -- -- "xvmuldp 60, 40, 37 \n\t" // s * y -- "xvmuldp 61, 41, 37 \n\t" -- "xvmuldp 62, 42, 37 \n\t" -- "xvmuldp 63, 43, 37 \n\t" -- -- "xvadddp 48, 48 , 60 \n\t" // c * x + s * y -- "xvadddp 49, 49 , 61 \n\t" // c * x + s * y -- "xvadddp 50, 50 , 62 \n\t" // c * x + s * y -- "xvadddp 51, 51 , 63 \n\t" // c * x + s * y -- -- "xvsubdp 56, 56 , 52 \n\t" // c * y - s * x -- "xvsubdp 57, 57 , 53 \n\t" // c * y - s * x -- "xvsubdp 58, 58 , 54 \n\t" // c * y - s * x -- "xvsubdp 59, 59 , 55 \n\t" // c * y - s * x -- -- "stxvd2x 48, 0, %8 \n\t" // store x -- "stxvd2x 49, %5, %8 \n\t" -- "stxvd2x 50, %6, %8 \n\t" -- "stxvd2x 51, %7, %8 \n\t" -+ "lxvd2x 32, 0, %3 \n\t" // load x -+ "lxvd2x 33, %15, %3 \n\t" -+ "lxvd2x 34, %16, %3 \n\t" -+ "lxvd2x 35, %17, %3 \n\t" - -- "stxvd2x 56, 0, %9 \n\t" // store y -- "stxvd2x 57, %5, %9 \n\t" -- "stxvd2x 58, %6, %9 \n\t" -- "stxvd2x 59, %7, %9 \n\t" -+ "lxvd2x 48, 0, %4 \n\t" // load y -+ "lxvd2x 49, %15, %4 \n\t" -+ "lxvd2x 50, %16, %4 \n\t" -+ "lxvd2x 51, %17, %4 \n\t" - -+ "addi %3, %3, 64 \n\t" -+ "addi %4, %4, 64 \n\t" - -+ "addic. %2, %2, -8 \n\t" -+ "ble 2f \n\t" - -- : -- : -- "r" (i), // 0 -- "r" (x1), // 1 -- "r" (y1), // 2 -- "r" (c), // 3 -- "r" (s), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (x2), // 8 -- "r" (y2) // 9 -- : "cr0", "%0", "%1" , "%2", "%8", "%9", "memory" -- ); -+ ".p2align 5 \n" -+ "1: \n\t" - --} -+ "xvmuldp 40, 32, 36 \n\t" // c * x -+ "xvmuldp 41, 33, 36 \n\t" -+ "xvmuldp 42, 34, 36 \n\t" -+ "xvmuldp 43, 35, 36 \n\t" - -+ "xvmuldp %x5, 48, 36 \n\t" // c * y -+ "xvmuldp %x6, 49, 36 \n\t" -+ "xvmuldp %x7, 50, 36 \n\t" -+ "xvmuldp %x8, 51, 36 \n\t" - -+ "xvmuldp 44, 32, 37 \n\t" // s * x -+ "xvmuldp 45, 33, 37 \n\t" -+ -+ "lxvd2x 32, 0, %3 \n\t" // load x -+ "lxvd2x 33, %15, %3 \n\t" -+ -+ "xvmuldp 46, 34, 37 \n\t" -+ "xvmuldp 47, 35, 37 \n\t" -+ -+ "lxvd2x 34, %16, %3 \n\t" -+ "lxvd2x 35, %17, %3 \n\t" -+ -+ "xvmuldp %x9, 48, 37 \n\t" // s * y -+ "xvmuldp %x10, 49, 37 \n\t" -+ -+ "lxvd2x 48, 0, %4 \n\t" // load y -+ "lxvd2x 49, %15, %4 \n\t" -+ -+ "xvmuldp %x11, 50, 37 \n\t" -+ "xvmuldp %x12, 51, 37 \n\t" -+ -+ "lxvd2x 50, %16, %4 \n\t" -+ "lxvd2x 51, %17, %4 \n\t" -+ -+ "xvadddp 40, 40, %x9 \n\t" // c * x + s * y -+ "xvadddp 41, 41, %x10 \n\t" // c * x + s * y -+ -+ "addi %3, %3, -64 \n\t" -+ "addi %4, %4, -64 \n\t" -+ -+ "xvadddp 42, 42, %x11 \n\t" // c * x + s * y -+ "xvadddp 43, 43, %x12 \n\t" // c * x + s * y -+ -+ "xvsubdp %x5, %x5, 44 \n\t" // c * y - s * x -+ "xvsubdp %x6, %x6, 45 \n\t" // c * y - s * x -+ "xvsubdp %x7, %x7, 46 \n\t" // c * y - s * x -+ "xvsubdp %x8, %x8, 47 \n\t" // c * y - s * x -+ -+ "stxvd2x 40, 0, %3 \n\t" // store x -+ "stxvd2x 41, %15, %3 \n\t" -+ "stxvd2x 42, %16, %3 \n\t" -+ "stxvd2x 43, %17, %3 \n\t" -+ -+ "stxvd2x %x5, 0, %4 \n\t" // store y -+ "stxvd2x %x6, %15, %4 \n\t" -+ "stxvd2x %x7, %16, %4 \n\t" -+ "stxvd2x %x8, %17, %4 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %4, %4, 128 \n\t" -+ -+ "addic. %2, %2, -8 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmuldp 40, 32, 36 \n\t" // c * x -+ "xvmuldp 41, 33, 36 \n\t" -+ "xvmuldp 42, 34, 36 \n\t" -+ "xvmuldp 43, 35, 36 \n\t" -+ -+ "xvmuldp %x5, 48, 36 \n\t" // c * y -+ "xvmuldp %x6, 49, 36 \n\t" -+ "xvmuldp %x7, 50, 36 \n\t" -+ "xvmuldp %x8, 51, 36 \n\t" -+ -+ "xvmuldp 44, 32, 37 \n\t" // s * x -+ "xvmuldp 45, 33, 37 \n\t" -+ "xvmuldp 46, 34, 37 \n\t" -+ "xvmuldp 47, 35, 37 \n\t" -+ -+ "xvmuldp %x9, 48, 37 \n\t" // s * y -+ "xvmuldp %x10, 49, 37 \n\t" -+ "xvmuldp %x11, 50, 37 \n\t" -+ "xvmuldp %x12, 51, 37 \n\t" -+ -+ "addi %3, %3, -64 \n\t" -+ "addi %4, %4, -64 \n\t" -+ -+ "xvadddp 40, 40, %x9 \n\t" // c * x + s * y -+ "xvadddp 41, 41, %x10 \n\t" // c * x + s * y -+ "xvadddp 42, 42, %x11 \n\t" // c * x + s * y -+ "xvadddp 43, 43, %x12 \n\t" // c * x + s * y -+ -+ "xvsubdp %x5, %x5, 44 \n\t" // c * y - s * x -+ "xvsubdp %x6, %x6, 45 \n\t" // c * y - s * x -+ "xvsubdp %x7, %x7, 46 \n\t" // c * y - s * x -+ "xvsubdp %x8, %x8, 47 \n\t" // c * y - s * x -+ -+ "stxvd2x 40, 0, %3 \n\t" // store x -+ "stxvd2x 41, %15, %3 \n\t" -+ "stxvd2x 42, %16, %3 \n\t" -+ "stxvd2x 43, %17, %3 \n\t" -+ -+ "stxvd2x %x5, 0, %4 \n\t" // store y -+ "stxvd2x %x6, %15, %4 \n\t" -+ "stxvd2x %x7, %16, %4 \n\t" -+ "stxvd2x %x8, %17, %4 \n" -+ -+ "#n=%2 x=%0=%3 y=%1=%4 c=%13 s=%14 o16=%15 o32=%16 o48=%17\n" -+ "#t0=%x5 t1=%x6 t2=%x7 t3=%x8 t4=%x9 t5=%x10 t6=%x11 t7=%x12" -+ : -+ "+m" (*x), -+ "+m" (*y), -+ "+r" (n), // 2 -+ "+b" (x), // 3 -+ "+b" (y), // 4 -+ "=wa" (t0), // 5 -+ "=wa" (t1), // 6 -+ "=wa" (t2), // 7 -+ "=wa" (t3), // 8 -+ "=wa" (t4), // 9 -+ "=wa" (t5), // 10 -+ "=wa" (t6), // 11 -+ "=wa" (t7) // 12 -+ : -+ "d" (c), // 13 -+ "d" (s), // 14 -+ "b" (16), // 15 -+ "b" (32), // 16 -+ "b" (48) // 17 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+} -diff --git a/kernel/power/dscal.c b/kernel/power/dscal.c -index c62a563..f32dc4b 100644 ---- a/kernel/power/dscal.c -+++ b/kernel/power/dscal.c -@@ -41,11 +41,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #if !defined(HAVE_KERNEL_8) - --static void dscal_kernel_8( BLASLONG n, FLOAT *da , FLOAT *x ) -+static void dscal_kernel_8 (BLASLONG n, FLOAT *x, FLOAT alpha) - { - - BLASLONG i; -- FLOAT alpha = *da; - - for( i=0; i 0 ) - { -- FLOAT alpha[2]; -- alpha[0]=da; -- alpha[1]=da; -- dscal_kernel_8_zero(n1 , alpha , x); -+ dscal_kernel_8_zero(n1, x); - j=n1; - } - -@@ -123,10 +119,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS - BLASLONG n1 = n & -16; - if ( n1 > 0 ) - { -- FLOAT alpha[2]; -- alpha[0]=da; -- alpha[1]=da; -- dscal_kernel_8(n1 , alpha , x); -+ dscal_kernel_8(n1, x, da); - j=n1; - } - while(j < n) -diff --git a/kernel/power/dscal_microk_power8.c b/kernel/power/dscal_microk_power8.c -index d90c3d8..04898eb 100644 ---- a/kernel/power/dscal_microk_power8.c -+++ b/kernel/power/dscal_microk_power8.c -@@ -35,185 +35,149 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_8 1 - --static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) __attribute__ ((noinline)); -- --static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) -+static void dscal_kernel_8 (long n, double *x, double alpha) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *x2=x+1; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "lxsdx 33, 0, %3 \n\t" -- "xxspltd 32, 33, 0 \n\t" -- "addi %1, %1, -8 \n\t" -- -- "dcbt %2, %4 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %4 \n\t" -- -- "xvmuldp 48, 40, 32 \n\t" -- "xvmuldp 49, 41, 32 \n\t" -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "xvmuldp 50, 42, 32 \n\t" -- "xvmuldp 51, 43, 32 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "xvmuldp 52, 44, 32 \n\t" -- "xvmuldp 53, 45, 32 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "xvmuldp 54, 46, 32 \n\t" -- "xvmuldp 55, 47, 32 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "stxvd2x 48, 0, %1 \n\t" -- "stxvd2x 49, %5, %1 \n\t" -- "stxvd2x 50, %6, %1 \n\t" -- "stxvd2x 51, %7, %1 \n\t" -- "stxvd2x 52, %8, %1 \n\t" -- "stxvd2x 53, %9, %1 \n\t" -- "stxvd2x 54, %10, %1 \n\t" -- "stxvd2x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmuldp 48, 40, 32 \n\t" -- "xvmuldp 49, 41, 32 \n\t" -- "xvmuldp 50, 42, 32 \n\t" -- "xvmuldp 51, 43, 32 \n\t" -- "xvmuldp 52, 44, 32 \n\t" -- "xvmuldp 53, 45, 32 \n\t" -- "xvmuldp 54, 46, 32 \n\t" -- "xvmuldp 55, 47, 32 \n\t" -- -- "stxvd2x 48, 0, %1 \n\t" -- "stxvd2x 49, %5, %1 \n\t" -- "stxvd2x 50, %6, %1 \n\t" -- "stxvd2x 51, %7, %1 \n\t" -- "stxvd2x 52, %8, %1 \n\t" -- "stxvd2x 53, %9, %1 \n\t" -- "stxvd2x 54, %10, %1 \n\t" -- "stxvd2x 55, %11, %1 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (x2), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- --static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) __attribute__ ((noinline)); -- --static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xxspltd %x3, %x3, 0 \n\t" -+ -+ "lxvd2x 32, 0, %2 \n\t" -+ "lxvd2x 33, %4, %2 \n\t" -+ "lxvd2x 34, %5, %2 \n\t" -+ "lxvd2x 35, %6, %2 \n\t" -+ "lxvd2x 36, %7, %2 \n\t" -+ "lxvd2x 37, %8, %2 \n\t" -+ "lxvd2x 38, %9, %2 \n\t" -+ "lxvd2x 39, %10, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmuldp 40, 32, %x3 \n\t" -+ "xvmuldp 41, 33, %x3 \n\t" -+ "lxvd2x 32, 0, %2 \n\t" -+ "lxvd2x 33, %4, %2 \n\t" -+ "xvmuldp 42, 34, %x3 \n\t" -+ "xvmuldp 43, 35, %x3 \n\t" -+ "lxvd2x 34, %5, %2 \n\t" -+ "lxvd2x 35, %6, %2 \n\t" -+ "xvmuldp 44, 36, %x3 \n\t" -+ "xvmuldp 45, 37, %x3 \n\t" -+ "lxvd2x 36, %7, %2 \n\t" -+ "lxvd2x 37, %8, %2 \n\t" -+ "xvmuldp 46, 38, %x3 \n\t" -+ "xvmuldp 47, 39, %x3 \n\t" -+ "lxvd2x 38, %9, %2 \n\t" -+ "lxvd2x 39, %10, %2 \n\t" -+ -+ "addi %2, %2, -128 \n\t" -+ -+ "stxvd2x 40, 0, %2 \n\t" -+ "stxvd2x 41, %4, %2 \n\t" -+ "stxvd2x 42, %5, %2 \n\t" -+ "stxvd2x 43, %6, %2 \n\t" -+ "stxvd2x 44, %7, %2 \n\t" -+ "stxvd2x 45, %8, %2 \n\t" -+ "stxvd2x 46, %9, %2 \n\t" -+ "stxvd2x 47, %10, %2 \n\t" -+ -+ "addi %2, %2, 256 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmuldp 40, 32, %x3 \n\t" -+ "xvmuldp 41, 33, %x3 \n\t" -+ "xvmuldp 42, 34, %x3 \n\t" -+ "xvmuldp 43, 35, %x3 \n\t" -+ -+ "addi %2, %2, -128 \n\t" -+ -+ "xvmuldp 44, 36, %x3 \n\t" -+ "xvmuldp 45, 37, %x3 \n\t" -+ "xvmuldp 46, 38, %x3 \n\t" -+ "xvmuldp 47, 39, %x3 \n\t" -+ -+ "stxvd2x 40, 0, %2 \n\t" -+ "stxvd2x 41, %4, %2 \n\t" -+ "stxvd2x 42, %5, %2 \n\t" -+ "stxvd2x 43, %6, %2 \n\t" -+ "stxvd2x 44, %7, %2 \n\t" -+ "stxvd2x 45, %8, %2 \n\t" -+ "stxvd2x 46, %9, %2 \n\t" -+ "stxvd2x 47, %10, %2 \n" -+ -+ "#n=%1 alpha=%3 x=%0=%2 o16=%4 o32=%5 o48=%6 o64=%7 o80=%8 o96=%9 o112=%10" -+ : -+ "+m" (*x), -+ "+r" (n), // 1 -+ "+b" (x) // 2 -+ : -+ "d" (alpha), // 3 -+ "b" (16), // 4 -+ "b" (32), // 5 -+ "b" (48), // 6 -+ "b" (64), // 7 -+ "b" (80), // 8 -+ "b" (96), // 9 -+ "b" (112) // 10 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -+ -+ -+static void dscal_kernel_8_zero (long n, double *x) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *x2=x+1; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "xxlxor 32 , 32 , 32 \n\t" -- "addi %1, %1, -8 \n\t" -- -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "stxvd2x 32, 0, %1 \n\t" -- "stxvd2x 32, %5, %1 \n\t" -- "stxvd2x 32, %6, %1 \n\t" -- "stxvd2x 32, %7, %1 \n\t" -- "stxvd2x 32, %8, %1 \n\t" -- "stxvd2x 32, %9, %1 \n\t" -- "stxvd2x 32, %10, %1 \n\t" -- "stxvd2x 32, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (x2), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __vector double t0; -+ -+ __asm__ -+ ( -+ "xxlxor %x3, %x3, %x3 \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "stxvd2x %x3, 0, %2 \n\t" -+ "stxvd2x %x3, %4, %2 \n\t" -+ "stxvd2x %x3, %5, %2 \n\t" -+ "stxvd2x %x3, %6, %2 \n\t" -+ "stxvd2x %x3, %7, %2 \n\t" -+ "stxvd2x %x3, %8, %2 \n\t" -+ "stxvd2x %x3, %9, %2 \n\t" -+ "stxvd2x %x3, %10, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "bgt 1b \n" -+ -+ "#n=%1 x=%0=%2 t0=%x3 o16=%4 o32=%5 o48=%6 o64=%7 o80=%8 o96=%9 o112=%10" -+ : -+ "=m" (*x), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0) // 3 -+ : -+ "b" (16), // 4 -+ "b" (32), // 5 -+ "b" (48), // 6 -+ "b" (64), // 7 -+ "b" (80), // 8 -+ "b" (96), // 9 -+ "b" (112) // 10 -+ : -+ "cr0" -+ ); -+} -diff --git a/kernel/power/dswap_microk_power8.c b/kernel/power/dswap_microk_power8.c -index 77747c3..31eff34 100644 ---- a/kernel/power/dswap_microk_power8.c -+++ b/kernel/power/dswap_microk_power8.c -@@ -35,146 +35,124 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_32 1 - --static void dswap_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void dswap_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) -+static void dswap_kernel_32 (long n, double *x, double *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *x2=x+1; -- FLOAT *y2=y+1; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "addi %3, %3, -8 \n\t" -- "addi %4, %4, -8 \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "lxvd2x 32, 0, %2 \n\t" -- "lxvd2x 33, %5, %2 \n\t" -- "lxvd2x 34, %6, %2 \n\t" -- "lxvd2x 35, %7, %2 \n\t" -- "lxvd2x 36, %8, %2 \n\t" -- "lxvd2x 37, %9, %2 \n\t" -- "lxvd2x 38, %10, %2 \n\t" -- "lxvd2x 39, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvd2x 48, 0, %1 \n\t" -- "lxvd2x 49, %5, %1 \n\t" -- "lxvd2x 50, %6, %1 \n\t" -- "lxvd2x 51, %7, %1 \n\t" -- "lxvd2x 52, %8, %1 \n\t" -- "lxvd2x 53, %9, %1 \n\t" -- "lxvd2x 54, %10, %1 \n\t" -- "lxvd2x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "lxvd2x 56, 0, %1 \n\t" -- "lxvd2x 57, %5, %1 \n\t" -- "lxvd2x 58, %6, %1 \n\t" -- "lxvd2x 59, %7, %1 \n\t" -- "lxvd2x 60, %8, %1 \n\t" -- "lxvd2x 61, %9, %1 \n\t" -- "lxvd2x 62, %10, %1 \n\t" -- "lxvd2x 63, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvd2x 32, 0, %3 \n\t" -- "stxvd2x 33, %5, %3 \n\t" -- "stxvd2x 34, %6, %3 \n\t" -- "stxvd2x 35, %7, %3 \n\t" -- "stxvd2x 36, %8, %3 \n\t" -- "stxvd2x 37, %9, %3 \n\t" -- "stxvd2x 38, %10, %3 \n\t" -- "stxvd2x 39, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvd2x 40, 0, %3 \n\t" -- "stxvd2x 41, %5, %3 \n\t" -- "stxvd2x 42, %6, %3 \n\t" -- "stxvd2x 43, %7, %3 \n\t" -- "stxvd2x 44, %8, %3 \n\t" -- "stxvd2x 45, %9, %3 \n\t" -- "stxvd2x 46, %10, %3 \n\t" -- "stxvd2x 47, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvd2x 48, 0, %4 \n\t" -- "stxvd2x 49, %5, %4 \n\t" -- "stxvd2x 50, %6, %4 \n\t" -- "stxvd2x 51, %7, %4 \n\t" -- "stxvd2x 52, %8, %4 \n\t" -- "stxvd2x 53, %9, %4 \n\t" -- "stxvd2x 54, %10, %4 \n\t" -- "stxvd2x 55, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "stxvd2x 56, 0, %4 \n\t" -- "stxvd2x 57, %5, %4 \n\t" -- "stxvd2x 58, %6, %4 \n\t" -- "stxvd2x 59, %7, %4 \n\t" -- "stxvd2x 60, %8, %4 \n\t" -- "stxvd2x 61, %9, %4 \n\t" -- "stxvd2x 62, %10, %4 \n\t" -- "stxvd2x 63, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (y2), // 3 -- "r" (x2), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "%3", "%4", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "lxvd2x 32, 0, %4 \n\t" -+ "lxvd2x 33, %5, %4 \n\t" -+ "lxvd2x 34, %6, %4 \n\t" -+ "lxvd2x 35, %7, %4 \n\t" -+ "lxvd2x 36, %8, %4 \n\t" -+ "lxvd2x 37, %9, %4 \n\t" -+ "lxvd2x 38, %10, %4 \n\t" -+ "lxvd2x 39, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "lxvd2x 40, 0, %4 \n\t" -+ "lxvd2x 41, %5, %4 \n\t" -+ "lxvd2x 42, %6, %4 \n\t" -+ "lxvd2x 43, %7, %4 \n\t" -+ "lxvd2x 44, %8, %4 \n\t" -+ "lxvd2x 45, %9, %4 \n\t" -+ "lxvd2x 46, %10, %4 \n\t" -+ "lxvd2x 47, %11, %4 \n\t" -+ -+ "addi %4, %4, -128 \n\t" -+ -+ "lxvd2x 48, 0, %3 \n\t" -+ "lxvd2x 49, %5, %3 \n\t" -+ "lxvd2x 50, %6, %3 \n\t" -+ "lxvd2x 51, %7, %3 \n\t" -+ "lxvd2x 0, %8, %3 \n\t" -+ "lxvd2x 1, %9, %3 \n\t" -+ "lxvd2x 2, %10, %3 \n\t" -+ "lxvd2x 3, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "lxvd2x 4, 0, %3 \n\t" -+ "lxvd2x 5, %5, %3 \n\t" -+ "lxvd2x 6, %6, %3 \n\t" -+ "lxvd2x 7, %7, %3 \n\t" -+ "lxvd2x 8, %8, %3 \n\t" -+ "lxvd2x 9, %9, %3 \n\t" -+ "lxvd2x 10, %10, %3 \n\t" -+ "lxvd2x 11, %11, %3 \n\t" -+ -+ "addi %3, %3, -128 \n\t" -+ -+ "stxvd2x 32, 0, %3 \n\t" -+ "stxvd2x 33, %5, %3 \n\t" -+ "stxvd2x 34, %6, %3 \n\t" -+ "stxvd2x 35, %7, %3 \n\t" -+ "stxvd2x 36, %8, %3 \n\t" -+ "stxvd2x 37, %9, %3 \n\t" -+ "stxvd2x 38, %10, %3 \n\t" -+ "stxvd2x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvd2x 40, 0, %3 \n\t" -+ "stxvd2x 41, %5, %3 \n\t" -+ "stxvd2x 42, %6, %3 \n\t" -+ "stxvd2x 43, %7, %3 \n\t" -+ "stxvd2x 44, %8, %3 \n\t" -+ "stxvd2x 45, %9, %3 \n\t" -+ "stxvd2x 46, %10, %3 \n\t" -+ "stxvd2x 47, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvd2x 48, 0, %4 \n\t" -+ "stxvd2x 49, %5, %4 \n\t" -+ "stxvd2x 50, %6, %4 \n\t" -+ "stxvd2x 51, %7, %4 \n\t" -+ "stxvd2x 0, %8, %4 \n\t" -+ "stxvd2x 1, %9, %4 \n\t" -+ "stxvd2x 2, %10, %4 \n\t" -+ "stxvd2x 3, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "stxvd2x 4, 0, %4 \n\t" -+ "stxvd2x 5, %5, %4 \n\t" -+ "stxvd2x 6, %6, %4 \n\t" -+ "stxvd2x 7, %7, %4 \n\t" -+ "stxvd2x 8, %8, %4 \n\t" -+ "stxvd2x 9, %9, %4 \n\t" -+ "stxvd2x 10, %10, %4 \n\t" -+ "stxvd2x 11, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "addic. %2, %2, -32 \n\t" -+ "bgt 1b \n" -+ -+ "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "+m" (*x), -+ "+m" (*y), -+ "+r" (n), // 2 -+ "+b" (x), // 3 -+ "+b" (y) // 4 -+ : -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51","vs0","vs1","vs2","vs3", -+ "vs4","vs5","vs6","vs7","vs8","vs9","vs10","vs11" -+ ); -+} -diff --git a/kernel/power/sasum.c b/kernel/power/sasum.c -index 43311f2..fb10b1d 100644 ---- a/kernel/power/sasum.c -+++ b/kernel/power/sasum.c -@@ -38,7 +38,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #if defined(DOUBLE) - --#define ABS fabs -+#error supports float only - - #else - -@@ -53,7 +53,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_32 - --static void sasum_kernel_32(BLASLONG n, FLOAT *x1, FLOAT *svec) -+static FLOAT sasum_kernel_32(BLASLONG n, FLOAT *x1) - { - - BLASLONG i=0; -@@ -92,11 +92,7 @@ static void sasum_kernel_32(BLASLONG n, FLOAT *x1, FLOAT *svec) - - } - -- svec[0] = sum0+sum1+sum2+sum3; -- svec[1] = 0.0; -- svec[2] = 0.0; -- svec[3] = 0.0; -- -+ return sum0+sum1+sum2+sum3; - } - - #endif -@@ -105,7 +101,6 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - { - BLASLONG i=0; - FLOAT sumf = 0.0; -- FLOAT svec[4] __attribute__ ((aligned (16)));; - BLASLONG n1; - - if (n <= 0 || inc_x <= 0) return(sumf); -@@ -117,8 +112,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - if ( n1 > 0 ) - { - -- sasum_kernel_32(n1, x, svec); -- sumf = svec[0] + svec[1]+svec[2]+svec[3]; -+ sumf = sasum_kernel_32(n1, x); - i=n1; - } - -diff --git a/kernel/power/sasum_microk_power8.c b/kernel/power/sasum_microk_power8.c -index 847fffe..25a969d 100644 ---- a/kernel/power/sasum_microk_power8.c -+++ b/kernel/power/sasum_microk_power8.c -@@ -34,144 +34,145 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_32 1 --static void sasum_kernel_32( BLASLONG n, FLOAT *x, FLOAT *svec) __attribute__ ((noinline)); - --static void sasum_kernel_32( BLASLONG n, FLOAT *x, FLOAT *svec) -+static float sasum_kernel_32 (long n, float *x) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "dcbt %2 , %4 \n\t" -- -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2 , %4 \n\t" -- -- "xvabssp 48, 40 \n\t" -- "xvabssp 49, 41 \n\t" -- "xvabssp 50, 42 \n\t" -- "xvabssp 51, 43 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- -- "xvabssp 52, 44 \n\t" -- "xvabssp 53, 45 \n\t" -- -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- -- "xvabssp 54, 46 \n\t" -- "xvabssp 55, 47 \n\t" -- -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- -- "xvaddsp 32, 32, 48 \n\t" -- "xvaddsp 33, 33, 49 \n\t" -- -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "xvaddsp 34, 34, 50 \n\t" -- "xvaddsp 35, 35, 51 \n\t" -- "addi %2, %2, 128 \n\t" -- "xvaddsp 36, 36, 52 \n\t" -- "xvaddsp 37, 37, 53 \n\t" -- "addic. %0 , %0 , -32 \n\t" -- "xvaddsp 38, 38, 54 \n\t" -- "xvaddsp 39, 39, 55 \n\t" -- -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- -- "xvabssp 48, 40 \n\t" -- "xvabssp 49, 41 \n\t" -- "xvabssp 50, 42 \n\t" -- "xvabssp 51, 43 \n\t" -- "xvabssp 52, 44 \n\t" -- "xvabssp 53, 45 \n\t" -- "xvabssp 54, 46 \n\t" -- "xvabssp 55, 47 \n\t" -- -- "xvaddsp 32, 32, 48 \n\t" -- "xvaddsp 33, 33, 49 \n\t" -- "xvaddsp 34, 34, 50 \n\t" -- "xvaddsp 35, 35, 51 \n\t" -- "xvaddsp 36, 36, 52 \n\t" -- "xvaddsp 37, 37, 53 \n\t" -- "xvaddsp 38, 38, 54 \n\t" -- "xvaddsp 39, 39, 55 \n\t" -- -- "xvaddsp 32, 32, 33 \n\t" -- "xvaddsp 34, 34, 35 \n\t" -- "xvaddsp 36, 36, 37 \n\t" -- "xvaddsp 38, 38, 39 \n\t" -- -- "xvaddsp 32, 32, 34 \n\t" -- "xvaddsp 36, 36, 38 \n\t" -- -- "xvaddsp 32, 32, 36 \n\t" -- -- -- "stxvw4x 32, 0, %3 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (svec), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2", "memory" -- ); -- --} -- -- -+ float sum; -+ __vector float t0; -+ __vector float t1; -+ __vector float t2; -+ __vector float t3; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %8, %2 \n\t" -+ "lxvw4x 42, %9, %2 \n\t" -+ "lxvw4x 43, %10, %2 \n\t" -+ "lxvw4x 44, %11, %2 \n\t" -+ "lxvw4x 45, %12, %2 \n\t" -+ "lxvw4x 46, %13, %2 \n\t" -+ "lxvw4x 47, %14, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvabssp 48, 40 \n\t" -+ "xvabssp 49, 41 \n\t" -+ "xvabssp 50, 42 \n\t" -+ "xvabssp 51, 43 \n\t" -+ -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %8, %2 \n\t" -+ -+ "xvabssp %x3, 44 \n\t" -+ "xvabssp %x4, 45 \n\t" -+ -+ "lxvw4x 42, %9, %2 \n\t" -+ "lxvw4x 43, %10, %2 \n\t" -+ -+ "xvabssp %x5, 46 \n\t" -+ "xvabssp %x6, 47 \n\t" -+ -+ "lxvw4x 44, %11, %2 \n\t" -+ "lxvw4x 45, %12, %2 \n\t" -+ -+ "xvaddsp 32, 32, 48 \n\t" -+ "xvaddsp 33, 33, 49 \n\t" -+ -+ "lxvw4x 46, %13, %2 \n\t" -+ "lxvw4x 47, %14, %2 \n\t" -+ -+ "xvaddsp 34, 34, 50 \n\t" -+ "xvaddsp 35, 35, 51 \n\t" -+ "addi %2, %2, 128 \n\t" -+ "xvaddsp 36, 36, %x3 \n\t" -+ "xvaddsp 37, 37, %x4 \n\t" -+ "addic. %1, %1, -32 \n\t" -+ "xvaddsp 38, 38, %x5 \n\t" -+ "xvaddsp 39, 39, %x6 \n\t" -+ -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvabssp 48, 40 \n\t" -+ "xvabssp 49, 41 \n\t" -+ "xvabssp 50, 42 \n\t" -+ "xvabssp 51, 43 \n\t" -+ "xvabssp %x3, 44 \n\t" -+ "xvabssp %x4, 45 \n\t" -+ "xvabssp %x5, 46 \n\t" -+ "xvabssp %x6, 47 \n\t" -+ -+ "xvaddsp 32, 32, 48 \n\t" -+ "xvaddsp 33, 33, 49 \n\t" -+ "xvaddsp 34, 34, 50 \n\t" -+ "xvaddsp 35, 35, 51 \n\t" -+ "xvaddsp 36, 36, %x3 \n\t" -+ "xvaddsp 37, 37, %x4 \n\t" -+ "xvaddsp 38, 38, %x5 \n\t" -+ "xvaddsp 39, 39, %x6 \n\t" -+ -+ "xvaddsp 32, 32, 33 \n\t" -+ "xvaddsp 34, 34, 35 \n\t" -+ "xvaddsp 36, 36, 37 \n\t" -+ "xvaddsp 38, 38, 39 \n\t" -+ -+ "xvaddsp 32, 32, 34 \n\t" -+ "xvaddsp 36, 36, 38 \n\t" -+ -+ "xvaddsp 32, 32, 36 \n\t" -+ -+ "xxsldwi 33, 32, 32, 2 \n\t" -+ "xvaddsp 32, 32, 33 \n\t" -+ -+ "xxsldwi 33, 32, 32, 1 \n\t" -+ "xvaddsp 32, 32, 33 \n\t" -+ -+ "xscvspdp %0, 32 \n" -+ -+ "#n=%1 x=%3=%2 sum=%0 o16=%8 o32=%9 o48=%10 o64=%11 o80=%12 o96=%13 o112=%14\n" -+ "#t0=%x3 t1=%x4 t2=%x5 t3=%x6" -+ : -+ "=f" (sum), // 0 -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0), // 3 -+ "=wa" (t1), // 4 -+ "=wa" (t2), // 5 -+ "=wa" (t3) // 6 -+ : -+ "m" (*x), -+ "b" (16), // 8 -+ "b" (32), // 9 -+ "b" (48), // 10 -+ "b" (64), // 11 -+ "b" (80), // 12 -+ "b" (96), // 13 -+ "b" (112) // 14 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+ -+ return sum; -+} -diff --git a/kernel/power/scopy_microk_power8.c b/kernel/power/scopy_microk_power8.c -index 2e08e35..444a6d4 100644 ---- a/kernel/power/scopy_microk_power8.c -+++ b/kernel/power/scopy_microk_power8.c -@@ -35,97 +35,78 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_32 1 - --static void scopy_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void scopy_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) -+static void scopy_kernel_32 (long n, float *x, float *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "stxvw4x 40, 0, %1 \n\t" -- "stxvw4x 41, %5, %1 \n\t" -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "stxvw4x 42, %6, %1 \n\t" -- "stxvw4x 43, %7, %1 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "stxvw4x 44, %8, %1 \n\t" -- "stxvw4x 45, %9, %1 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "stxvw4x 46, %10, %1 \n\t" -- "stxvw4x 47, %11, %1 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "stxvw4x 40, 0, %1 \n\t" -- "stxvw4x 41, %5, %1 \n\t" -- "stxvw4x 42, %6, %1 \n\t" -- "stxvw4x 43, %7, %1 \n\t" -- "stxvw4x 44, %8, %1 \n\t" -- "stxvw4x 45, %9, %1 \n\t" -- "stxvw4x 46, %10, %1 \n\t" -- "stxvw4x 47, %11, %1 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %5, %2 \n\t" -+ "lxvw4x 42, %6, %2 \n\t" -+ "lxvw4x 43, %7, %2 \n\t" -+ "lxvw4x 44, %8, %2 \n\t" -+ "lxvw4x 45, %9, %2 \n\t" -+ "lxvw4x 46, %10, %2 \n\t" -+ "lxvw4x 47, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "stxvw4x 40, 0, %3 \n\t" -+ "stxvw4x 41, %5, %3 \n\t" -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 41, %5, %2 \n\t" -+ "stxvw4x 42, %6, %3 \n\t" -+ "stxvw4x 43, %7, %3 \n\t" -+ "lxvw4x 42, %6, %2 \n\t" -+ "lxvw4x 43, %7, %2 \n\t" -+ "stxvw4x 44, %8, %3 \n\t" -+ "stxvw4x 45, %9, %3 \n\t" -+ "lxvw4x 44, %8, %2 \n\t" -+ "lxvw4x 45, %9, %2 \n\t" -+ "stxvw4x 46, %10, %3 \n\t" -+ "stxvw4x 47, %11, %3 \n\t" -+ "lxvw4x 46, %10, %2 \n\t" -+ "lxvw4x 47, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "stxvw4x 40, 0, %3 \n\t" -+ "stxvw4x 41, %5, %3 \n\t" -+ "stxvw4x 42, %6, %3 \n\t" -+ "stxvw4x 43, %7, %3 \n\t" -+ "stxvw4x 44, %8, %3 \n\t" -+ "stxvw4x 45, %9, %3 \n\t" -+ "stxvw4x 46, %10, %3 \n\t" -+ "stxvw4x 47, %11, %3 \n" -+ -+ "#n=%1 x=%4=%2 y=%0=%3 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "=m" (*y), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y) // 3 -+ : -+ "m" (*x), -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -diff --git a/kernel/power/sdot.c b/kernel/power/sdot.c -index 52fb1fe..31f4734 100644 ---- a/kernel/power/sdot.c -+++ b/kernel/power/sdot.c -@@ -42,7 +42,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_16 - --static void sdot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) -+static FLOAT sdot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y) - { - BLASLONG register i = 0; - FLOAT dot = 0.0; -@@ -61,8 +61,7 @@ static void sdot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) - i+=8 ; - - } -- *d += dot; -- -+ return dot; - } - - #endif -@@ -82,8 +81,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) - BLASLONG n1 = n & -32; - - if ( n1 ) -- sdot_kernel_16(n1, x, y , &dot ); -- -+ dot = sdot_kernel_16(n1, x, y); - - i = n1; - while(i < n) -diff --git a/kernel/power/sdot_microk_power8.c b/kernel/power/sdot_microk_power8.c -index 6dd588a..7f7ccfa 100644 ---- a/kernel/power/sdot_microk_power8.c -+++ b/kernel/power/sdot_microk_power8.c -@@ -34,146 +34,142 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_16 1 --static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *dot) __attribute__ ((noinline)); - --static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) -+static float sdot_kernel_16 (long n, float *x, float *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- FLOAT tempdot[4]; -- -- -- __asm__ __volatile__ -- ( -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "dcbt %2, %12 \n\t" -- "dcbt %3, %12 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 48, 0, %3 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 49, %5, %3 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 50, %6, %3 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 51, %7, %3 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 52, %8, %3 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 53, %9, %3 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 54, %10, %3 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- "lxvw4x 55, %11, %3 \n\t" -- -- "addi %2, %2, 128 \n\t" -- "addi %3, %3, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %12 \n\t" -- "dcbt %3, %12 \n\t" -- -- "xvmaddasp 32, 40, 48 \n\t" -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 48, 0, %3 \n\t" -- "xvmaddasp 33, 41, 49 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 49, %5, %3 \n\t" -- "xvmaddasp 34, 42, 50 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 50, %6, %3 \n\t" -- "xvmaddasp 35, 43, 51 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 51, %7, %3 \n\t" -- "xvmaddasp 36, 44, 52 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 52, %8, %3 \n\t" -- "xvmaddasp 37, 45, 53 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 53, %9, %3 \n\t" -- "xvmaddasp 38, 46, 54 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 54, %10, %3 \n\t" -- "xvmaddasp 39, 47, 55 \n\t" -- -- "lxvw4x 47, %11, %2 \n\t" -- "lxvw4x 55, %11, %3 \n\t" -- -- -- "addi %2, %2, 128 \n\t" -- "addi %3, %3, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmaddasp 32, 40, 48 \n\t" -- "xvmaddasp 33, 41, 49 \n\t" -- "xvmaddasp 34, 42, 50 \n\t" -- "xvmaddasp 35, 43, 51 \n\t" -- "xvmaddasp 36, 44, 52 \n\t" -- "xvmaddasp 37, 45, 53 \n\t" -- "xvmaddasp 38, 46, 54 \n\t" -- "xvmaddasp 39, 47, 55 \n\t" -- -- "xvaddsp 32, 32 , 33 \n\t" -- "xvaddsp 34, 34 , 35 \n\t" -- "xvaddsp 36, 36 , 37 \n\t" -- "xvaddsp 38, 38 , 39 \n\t" -- -- "xvaddsp 32, 32 , 34 \n\t" -- "xvaddsp 36, 36 , 38 \n\t" -- -- "xvaddsp 32, 32 , 36 \n\t" -- -- "stxvw4x 32, 0 , %4 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (y1), // 3 -- "r" (tempdot), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112), // 11 -- "r" (pre) // 12 -- : "cr0", "%0", "%2" , "%3", "memory" -- ); -- -- *dot = tempdot[0] + tempdot[1] + tempdot[2] + tempdot[3]; -- -- --} -- -- -+ float dot; -+ __vector float t0; -+ __vector float t1; -+ __vector float t2; -+ __vector float t3; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ "dcbt 0, %3 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 48, 0, %3 \n\t" -+ "lxvw4x 41, %10, %2 \n\t" -+ "lxvw4x 49, %10, %3 \n\t" -+ "lxvw4x 42, %11, %2 \n\t" -+ "lxvw4x 50, %11, %3 \n\t" -+ "lxvw4x 43, %12, %2 \n\t" -+ "lxvw4x 51, %12, %3 \n\t" -+ "lxvw4x 44, %13, %2 \n\t" -+ "lxvw4x %x4, %13, %3 \n\t" -+ "lxvw4x 45, %14, %2 \n\t" -+ "lxvw4x %x5, %14, %3 \n\t" -+ "lxvw4x 46, %15, %2 \n\t" -+ "lxvw4x %x6, %15, %3 \n\t" -+ "lxvw4x 47, %16, %2 \n\t" -+ "lxvw4x %x7, %16, %3 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ "addi %3, %3, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmaddasp 32, 40, 48 \n\t" -+ "lxvw4x 40, 0, %2 \n\t" -+ "lxvw4x 48, 0, %3 \n\t" -+ "xvmaddasp 33, 41, 49 \n\t" -+ "lxvw4x 41, %10, %2 \n\t" -+ "lxvw4x 49, %10, %3 \n\t" -+ "xvmaddasp 34, 42, 50 \n\t" -+ "lxvw4x 42, %11, %2 \n\t" -+ "lxvw4x 50, %11, %3 \n\t" -+ "xvmaddasp 35, 43, 51 \n\t" -+ "lxvw4x 43, %12, %2 \n\t" -+ "lxvw4x 51, %12, %3 \n\t" -+ "xvmaddasp 36, 44, %x4 \n\t" -+ "lxvw4x 44, %13, %2 \n\t" -+ "lxvw4x %x4, %13, %3 \n\t" -+ "xvmaddasp 37, 45, %x5 \n\t" -+ "lxvw4x 45, %14, %2 \n\t" -+ "lxvw4x %x5, %14, %3 \n\t" -+ "xvmaddasp 38, 46, %x6 \n\t" -+ "lxvw4x 46, %15, %2 \n\t" -+ "lxvw4x %x6, %15, %3 \n\t" -+ "xvmaddasp 39, 47, %x7 \n\t" -+ "lxvw4x 47, %16, %2 \n\t" -+ "lxvw4x %x7, %16, %3 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ "addi %3, %3, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmaddasp 32, 40, 48 \n\t" -+ "xvmaddasp 33, 41, 49 \n\t" -+ "xvmaddasp 34, 42, 50 \n\t" -+ "xvmaddasp 35, 43, 51 \n\t" -+ "xvmaddasp 36, 44, %x4 \n\t" -+ "xvmaddasp 37, 45, %x5 \n\t" -+ "xvmaddasp 38, 46, %x6 \n\t" -+ "xvmaddasp 39, 47, %x7 \n\t" -+ -+ "xvaddsp 32, 32, 33 \n\t" -+ "xvaddsp 34, 34, 35 \n\t" -+ "xvaddsp 36, 36, 37 \n\t" -+ "xvaddsp 38, 38, 39 \n\t" -+ -+ "xvaddsp 32, 32, 34 \n\t" -+ "xvaddsp 36, 36, 38 \n\t" -+ -+ "xvaddsp 32, 32, 36 \n\t" -+ -+ "xxsldwi 33, 32, 32, 2 \n\t" -+ "xvaddsp 32, 32, 33 \n\t" -+ -+ "xxsldwi 33, 32, 32, 1 \n\t" -+ "xvaddsp 32, 32, 33 \n\t" -+ -+ "xscvspdp %x0, 32 \n" -+ -+ "#dot=%0 n=%1 x=%8=%2 y=%9=%3 o16=%10 o32=%11 o48=%12 o64=%13 o80=%14 o96=%15 o122=%16\n" -+ "#t0=%x4 t1=%x5 t2=%x6 t3=%x7" -+ : -+ "=f" (dot), // 0 -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y), // 3 -+ "=wa" (t0), // 4 -+ "=wa" (t1), // 5 -+ "=wa" (t2), // 6 -+ "=wa" (t3) // 7 -+ : -+ "m" (*x), -+ "m" (*y), -+ "b" (16), // 10 -+ "b" (32), // 11 -+ "b" (48), // 12 -+ "b" (64), // 13 -+ "b" (80), // 14 -+ "b" (96), // 15 -+ "b" (112) // 16 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+ -+ return dot; -+} -diff --git a/kernel/power/srot.c b/kernel/power/srot.c -index d464846..d2910ff 100644 ---- a/kernel/power/srot.c -+++ b/kernel/power/srot.c -@@ -46,7 +46,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_16 - --static void srot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) -+static void srot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) - { - - BLASLONG i=0; -@@ -56,8 +56,6 @@ static void srot_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) - FLOAT y00, y01, y02, y03; - FLOAT *x1=x; - FLOAT *y1=y; -- FLOAT c1=*c; -- FLOAT s1=*s; - - while ( i 0 ) - { -- c1[0]=c; -- c1[1]=c; -- c1[2]=c; -- c1[3]=c; -- s1[0]=s; -- s1[1]=s; -- s1[2]=s; -- s1[3]=s; -- srot_kernel_16(n1, x1, y1, c1, s1); -+ srot_kernel_16(n1, x1, y1, c, s); - i=n1; - } - -diff --git a/kernel/power/srot_microk_power8.c b/kernel/power/srot_microk_power8.c -index ade6550..0a18c16 100644 ---- a/kernel/power/srot_microk_power8.c -+++ b/kernel/power/srot_microk_power8.c -@@ -38,171 +38,179 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_16 1 - --static void srot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) __attribute__ ((noinline)); -- --static void srot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *c, FLOAT *s) -+static void srot_kernel_16 (long n, float *x, float *y, float c, float s) - { -+ __vector float t0; -+ __vector float t1; -+ __vector float t2; -+ __vector float t3; -+ __vector float t4; -+ __vector float t5; -+ __vector float t6; -+ __vector float t7; - -+ __asm__ -+ ( -+ "xscvdpspn 36, %x13 \n\t" // load c to all words -+ "xxspltw 36, 36, 0 \n\t" - -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *x2=x+1; -- FLOAT *y2=y+1; -- -- __asm__ __volatile__ -- ( -- -- "lxvw4x 36 , 0, %3 \n\t" // load c -- "lxvw4x 37 , 0, %4 \n\t" // load s -- "addi %8 , %8, -4 \n\t" -- "addi %9 , %9, -4 \n\t" -- -- "lxvw4x 32, 0, %1 \n\t" // load x -- "lxvw4x 33, %5, %1 \n\t" -- "lxvw4x 34, %6, %1 \n\t" -- "lxvw4x 35, %7, %1 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" // load y -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- -- "addi %1, %1, 64 \n\t" -- "addi %2, %2, 64 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "xvmulsp 48, 32, 36 \n\t" // c * x -- "xvmulsp 49, 33, 36 \n\t" -- "xvmulsp 50, 34, 36 \n\t" -- "xvmulsp 51, 35, 36 \n\t" -- -- "xvmulsp 56, 40, 36 \n\t" // c * y -- "xvmulsp 57, 41, 36 \n\t" -- "xvmulsp 58, 42, 36 \n\t" -- "xvmulsp 59, 43, 36 \n\t" -- -- "xvmulsp 52, 32, 37 \n\t" // s * x -- "xvmulsp 53, 33, 37 \n\t" -- -- "lxvw4x 32, 0, %1 \n\t" // load x -- "lxvw4x 33, %5, %1 \n\t" -- -- "xvmulsp 54, 34, 37 \n\t" -- "xvmulsp 55, 35, 37 \n\t" -- -- "lxvw4x 34, %6, %1 \n\t" -- "lxvw4x 35, %7, %1 \n\t" -- -- "xvmulsp 60, 40, 37 \n\t" // s * y -- "xvmulsp 61, 41, 37 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" // load y -- "lxvw4x 41, %5, %2 \n\t" -- -- "xvmulsp 62, 42, 37 \n\t" -- "xvmulsp 63, 43, 37 \n\t" -- -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- -- "xvaddsp 48, 48 , 60 \n\t" // c * x + s * y -- "xvaddsp 49, 49 , 61 \n\t" // c * x + s * y -- -- "addi %1, %1, 64 \n\t" -- "addi %2, %2, 64 \n\t" -- -- "xvaddsp 50, 50 , 62 \n\t" // c * x + s * y -- "xvaddsp 51, 51 , 63 \n\t" // c * x + s * y -- -- "xvsubsp 56, 56 , 52 \n\t" // c * y - s * x -- "xvsubsp 57, 57 , 53 \n\t" // c * y - s * x -- "xvsubsp 58, 58 , 54 \n\t" // c * y - s * x -- "xvsubsp 59, 59 , 55 \n\t" // c * y - s * x -- -- "stxvw4x 48, 0, %8 \n\t" // store x -- "stxvw4x 49, %5, %8 \n\t" -- "stxvw4x 50, %6, %8 \n\t" -- "stxvw4x 51, %7, %8 \n\t" -- -- "stxvw4x 56, 0, %9 \n\t" // store y -- "stxvw4x 57, %5, %9 \n\t" -- "stxvw4x 58, %6, %9 \n\t" -- "stxvw4x 59, %7, %9 \n\t" -- -- "addi %8, %8, 64 \n\t" -- "addi %9, %9, 64 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmulsp 48, 32, 36 \n\t" // c * x -- "xvmulsp 49, 33, 36 \n\t" -- "xvmulsp 50, 34, 36 \n\t" -- "xvmulsp 51, 35, 36 \n\t" -- -- "xvmulsp 56, 40, 36 \n\t" // c * y -- "xvmulsp 57, 41, 36 \n\t" -- "xvmulsp 58, 42, 36 \n\t" -- "xvmulsp 59, 43, 36 \n\t" -- -- "xvmulsp 52, 32, 37 \n\t" // s * x -- "xvmulsp 53, 33, 37 \n\t" -- "xvmulsp 54, 34, 37 \n\t" -- "xvmulsp 55, 35, 37 \n\t" -- -- "xvmulsp 60, 40, 37 \n\t" // s * y -- "xvmulsp 61, 41, 37 \n\t" -- "xvmulsp 62, 42, 37 \n\t" -- "xvmulsp 63, 43, 37 \n\t" -- -- "xvaddsp 48, 48 , 60 \n\t" // c * x + s * y -- "xvaddsp 49, 49 , 61 \n\t" // c * x + s * y -- "xvaddsp 50, 50 , 62 \n\t" // c * x + s * y -- "xvaddsp 51, 51 , 63 \n\t" // c * x + s * y -- -- "xvsubsp 56, 56 , 52 \n\t" // c * y - s * x -- "xvsubsp 57, 57 , 53 \n\t" // c * y - s * x -- "xvsubsp 58, 58 , 54 \n\t" // c * y - s * x -- "xvsubsp 59, 59 , 55 \n\t" // c * y - s * x -+ "xscvdpspn 37, %x14 \n\t" // load s to all words -+ "xxspltw 37, 37, 0 \n\t" - -- "stxvw4x 48, 0, %8 \n\t" // store x -- "stxvw4x 49, %5, %8 \n\t" -- "stxvw4x 50, %6, %8 \n\t" -- "stxvw4x 51, %7, %8 \n\t" -+ "lxvw4x 32, 0, %3 \n\t" // load x -+ "lxvw4x 33, %15, %3 \n\t" -+ "lxvw4x 34, %16, %3 \n\t" -+ "lxvw4x 35, %17, %3 \n\t" - -- "stxvw4x 56, 0, %9 \n\t" // store y -- "stxvw4x 57, %5, %9 \n\t" -- "stxvw4x 58, %6, %9 \n\t" -- "stxvw4x 59, %7, %9 \n\t" -+ "lxvw4x 48, 0, %4 \n\t" // load y -+ "lxvw4x 49, %15, %4 \n\t" -+ "lxvw4x 50, %16, %4 \n\t" -+ "lxvw4x 51, %17, %4 \n\t" - -+ "addi %3, %3, 64 \n\t" -+ "addi %4, %4, 64 \n\t" - -+ "addic. %2, %2, -16 \n\t" -+ "ble 2f \n\t" - -- : -- : -- "r" (i), // 0 -- "r" (x1), // 1 -- "r" (y1), // 2 -- "r" (c), // 3 -- "r" (s), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (x2), // 8 -- "r" (y2) // 9 -- : "cr0", "%0", "%1" , "%2", "%8", "%9", "memory" -- ); -+ ".p2align 5 \n" -+ "1: \n\t" - --} -+ "xvmulsp 40, 32, 36 \n\t" // c * x -+ "xvmulsp 41, 33, 36 \n\t" -+ "xvmulsp 42, 34, 36 \n\t" -+ "xvmulsp 43, 35, 36 \n\t" - -+ "xvmulsp %x5, 48, 36 \n\t" // c * y -+ "xvmulsp %x6, 49, 36 \n\t" -+ "xvmulsp %x7, 50, 36 \n\t" -+ "xvmulsp %x8, 51, 36 \n\t" - -+ "xvmulsp 44, 32, 37 \n\t" // s * x -+ "xvmulsp 45, 33, 37 \n\t" -+ -+ "lxvw4x 32, 0, %3 \n\t" // load x -+ "lxvw4x 33, %15, %3 \n\t" -+ -+ "xvmulsp 46, 34, 37 \n\t" -+ "xvmulsp 47, 35, 37 \n\t" -+ -+ "lxvw4x 34, %16, %3 \n\t" -+ "lxvw4x 35, %17, %3 \n\t" -+ -+ "xvmulsp %x9, 48, 37 \n\t" // s * y -+ "xvmulsp %x10, 49, 37 \n\t" -+ -+ "lxvw4x 48, 0, %4 \n\t" // load y -+ "lxvw4x 49, %15, %4 \n\t" -+ -+ "xvmulsp %x11, 50, 37 \n\t" -+ "xvmulsp %x12, 51, 37 \n\t" -+ -+ "lxvw4x 50, %16, %4 \n\t" -+ "lxvw4x 51, %17, %4 \n\t" -+ -+ "xvaddsp 40, 40, %x9 \n\t" // c * x + s * y -+ "xvaddsp 41, 41, %x10 \n\t" // c * x + s * y -+ -+ "addi %3, %3, -64 \n\t" -+ "addi %4, %4, -64 \n\t" -+ -+ "xvaddsp 42, 42, %x11 \n\t" // c * x + s * y -+ "xvaddsp 43, 43, %x12 \n\t" // c * x + s * y -+ -+ "xvsubsp %x5, %x5, 44 \n\t" // c * y - s * x -+ "xvsubsp %x6, %x6, 45 \n\t" // c * y - s * x -+ "xvsubsp %x7, %x7, 46 \n\t" // c * y - s * x -+ "xvsubsp %x8, %x8, 47 \n\t" // c * y - s * x -+ -+ "stxvw4x 40, 0, %3 \n\t" // store x -+ "stxvw4x 41, %15, %3 \n\t" -+ "stxvw4x 42, %16, %3 \n\t" -+ "stxvw4x 43, %17, %3 \n\t" -+ -+ "stxvw4x %x5, 0, %4 \n\t" // store y -+ "stxvw4x %x6, %15, %4 \n\t" -+ "stxvw4x %x7, %16, %4 \n\t" -+ "stxvw4x %x8, %17, %4 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %4, %4, 128 \n\t" -+ -+ "addic. %2, %2, -16 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmulsp 40, 32, 36 \n\t" // c * x -+ "xvmulsp 41, 33, 36 \n\t" -+ "xvmulsp 42, 34, 36 \n\t" -+ "xvmulsp 43, 35, 36 \n\t" -+ -+ "xvmulsp %x5, 48, 36 \n\t" // c * y -+ "xvmulsp %x6, 49, 36 \n\t" -+ "xvmulsp %x7, 50, 36 \n\t" -+ "xvmulsp %x8, 51, 36 \n\t" -+ -+ "xvmulsp 44, 32, 37 \n\t" // s * x -+ "xvmulsp 45, 33, 37 \n\t" -+ "xvmulsp 46, 34, 37 \n\t" -+ "xvmulsp 47, 35, 37 \n\t" -+ -+ "xvmulsp %x9, 48, 37 \n\t" // s * y -+ "xvmulsp %x10, 49, 37 \n\t" -+ "xvmulsp %x11, 50, 37 \n\t" -+ "xvmulsp %x12, 51, 37 \n\t" -+ -+ "addi %3, %3, -64 \n\t" -+ "addi %4, %4, -64 \n\t" -+ -+ "xvaddsp 40, 40, %x9 \n\t" // c * x + s * y -+ "xvaddsp 41, 41, %x10 \n\t" // c * x + s * y -+ "xvaddsp 42, 42, %x11 \n\t" // c * x + s * y -+ "xvaddsp 43, 43, %x12 \n\t" // c * x + s * y -+ -+ "xvsubsp %x5, %x5, 44 \n\t" // c * y - s * x -+ "xvsubsp %x6, %x6, 45 \n\t" // c * y - s * x -+ "xvsubsp %x7, %x7, 46 \n\t" // c * y - s * x -+ "xvsubsp %x8, %x8, 47 \n\t" // c * y - s * x -+ -+ "stxvw4x 40, 0, %3 \n\t" // store x -+ "stxvw4x 41, %15, %3 \n\t" -+ "stxvw4x 42, %16, %3 \n\t" -+ "stxvw4x 43, %17, %3 \n\t" -+ -+ "stxvw4x %x5, 0, %4 \n\t" // store y -+ "stxvw4x %x6, %15, %4 \n\t" -+ "stxvw4x %x7, %16, %4 \n\t" -+ "stxvw4x %x8, %17, %4 \n" -+ -+ "#n=%2 x=%0=%3 y=%1=%4 c=%13 s=%14 o16=%15 o32=%16 o48=%17\n" -+ "#t0=%x5 t1=%x6 t2=%x7 t3=%x8 t4=%x9 t5=%x10 t6=%x11 t7=%x12" -+ : -+ "+m" (*x), -+ "+m" (*y), -+ "+r" (n), // 2 -+ "+b" (x), // 3 -+ "+b" (y), // 4 -+ "=wa" (t0), // 5 -+ "=wa" (t1), // 6 -+ "=wa" (t2), // 7 -+ "=wa" (t3), // 8 -+ "=wa" (t4), // 9 -+ "=wa" (t5), // 10 -+ "=wa" (t6), // 11 -+ "=wa" (t7) // 12 -+ : -+ "f" (c), // 13 -+ "f" (s), // 14 -+ "b" (16), // 15 -+ "b" (32), // 16 -+ "b" (48) // 17 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+} -diff --git a/kernel/power/sscal.c b/kernel/power/sscal.c -index c6ef5e9..bd5cdc4 100644 ---- a/kernel/power/sscal.c -+++ b/kernel/power/sscal.c -@@ -42,11 +42,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #if !defined(HAVE_KERNEL_16) - --static void sscal_kernel_16( BLASLONG n, FLOAT *da , FLOAT *x ) -+static void sscal_kernel_16 (BLASLONG n, FLOAT *x, FLOAT alpha) - { - - BLASLONG i; -- FLOAT alpha = *da; - - for( i=0; i 0 ) - { -- alpha[0]=da; -- alpha[1]=da; -- alpha[2]=da; -- alpha[3]=da; -- sscal_kernel_16_zero(n1 , alpha , x); -+ sscal_kernel_16_zero(n1, x); - j=n1; - } - -@@ -127,11 +121,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS - BLASLONG n1 = n & -32; - if ( n1 > 0 ) - { -- alpha[0]=da; -- alpha[1]=da; -- alpha[2]=da; -- alpha[3]=da; -- sscal_kernel_16(n1 , alpha , x); -+ sscal_kernel_16(n1, x, da); - j=n1; - } - while(j < n) -diff --git a/kernel/power/sscal_microk_power8.c b/kernel/power/sscal_microk_power8.c -index 963cec7..49862a3 100644 ---- a/kernel/power/sscal_microk_power8.c -+++ b/kernel/power/sscal_microk_power8.c -@@ -35,184 +35,150 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_16 1 - --static void sscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x) __attribute__ ((noinline)); -- --static void sscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x) -+static void sscal_kernel_16 (long n, float *x, float alpha) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *x2=x+1; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "lxvw4x 32, 0, %3 \n\t" -- "addi %1, %1, -4 \n\t" -- -- "dcbt %2, %4 \n\t" -- -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %4 \n\t" -- -- "xvmulsp 48, 40, 32 \n\t" -- "xvmulsp 49, 41, 32 \n\t" -- "lxvw4x 40, 0, %2 \n\t" -- "lxvw4x 41, %5, %2 \n\t" -- "xvmulsp 50, 42, 32 \n\t" -- "xvmulsp 51, 43, 32 \n\t" -- "lxvw4x 42, %6, %2 \n\t" -- "lxvw4x 43, %7, %2 \n\t" -- "xvmulsp 52, 44, 32 \n\t" -- "xvmulsp 53, 45, 32 \n\t" -- "lxvw4x 44, %8, %2 \n\t" -- "lxvw4x 45, %9, %2 \n\t" -- "xvmulsp 54, 46, 32 \n\t" -- "xvmulsp 55, 47, 32 \n\t" -- "lxvw4x 46, %10, %2 \n\t" -- "lxvw4x 47, %11, %2 \n\t" -- -- "stxvw4x 48, 0, %1 \n\t" -- "stxvw4x 49, %5, %1 \n\t" -- "stxvw4x 50, %6, %1 \n\t" -- "stxvw4x 51, %7, %1 \n\t" -- "stxvw4x 52, %8, %1 \n\t" -- "stxvw4x 53, %9, %1 \n\t" -- "stxvw4x 54, %10, %1 \n\t" -- "stxvw4x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmulsp 48, 40, 32 \n\t" -- "xvmulsp 49, 41, 32 \n\t" -- "xvmulsp 50, 42, 32 \n\t" -- "xvmulsp 51, 43, 32 \n\t" -- "xvmulsp 52, 44, 32 \n\t" -- "xvmulsp 53, 45, 32 \n\t" -- "xvmulsp 54, 46, 32 \n\t" -- "xvmulsp 55, 47, 32 \n\t" -- -- "stxvw4x 48, 0, %1 \n\t" -- "stxvw4x 49, %5, %1 \n\t" -- "stxvw4x 50, %6, %1 \n\t" -- "stxvw4x 51, %7, %1 \n\t" -- "stxvw4x 52, %8, %1 \n\t" -- "stxvw4x 53, %9, %1 \n\t" -- "stxvw4x 54, %10, %1 \n\t" -- "stxvw4x 55, %11, %1 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (x2), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- --static void sscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) __attribute__ ((noinline)); -- --static void sscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xscvdpspn %x3, %x3 \n\t" -+ "xxspltw %x3, %x3, 0 \n\t" -+ -+ "lxvw4x 32, 0, %2 \n\t" -+ "lxvw4x 33, %4, %2 \n\t" -+ "lxvw4x 34, %5, %2 \n\t" -+ "lxvw4x 35, %6, %2 \n\t" -+ "lxvw4x 36, %7, %2 \n\t" -+ "lxvw4x 37, %8, %2 \n\t" -+ "lxvw4x 38, %9, %2 \n\t" -+ "lxvw4x 39, %10, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmulsp 40, 32, %x3 \n\t" -+ "xvmulsp 41, 33, %x3 \n\t" -+ "lxvw4x 32, 0, %2 \n\t" -+ "lxvw4x 33, %4, %2 \n\t" -+ "xvmulsp 42, 34, %x3 \n\t" -+ "xvmulsp 43, 35, %x3 \n\t" -+ "lxvw4x 34, %5, %2 \n\t" -+ "lxvw4x 35, %6, %2 \n\t" -+ "xvmulsp 44, 36, %x3 \n\t" -+ "xvmulsp 45, 37, %x3 \n\t" -+ "lxvw4x 36, %7, %2 \n\t" -+ "lxvw4x 37, %8, %2 \n\t" -+ "xvmulsp 46, 38, %x3 \n\t" -+ "xvmulsp 47, 39, %x3 \n\t" -+ "lxvw4x 38, %9, %2 \n\t" -+ "lxvw4x 39, %10, %2 \n\t" -+ -+ "addi %2, %2, -128 \n\t" -+ -+ "stxvw4x 40, 0, %2 \n\t" -+ "stxvw4x 41, %4, %2 \n\t" -+ "stxvw4x 42, %5, %2 \n\t" -+ "stxvw4x 43, %6, %2 \n\t" -+ "stxvw4x 44, %7, %2 \n\t" -+ "stxvw4x 45, %8, %2 \n\t" -+ "stxvw4x 46, %9, %2 \n\t" -+ "stxvw4x 47, %10, %2 \n\t" -+ -+ "addi %2, %2, 256 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmulsp 40, 32, %x3 \n\t" -+ "xvmulsp 41, 33, %x3 \n\t" -+ "xvmulsp 42, 34, %x3 \n\t" -+ "xvmulsp 43, 35, %x3 \n\t" -+ -+ "addi %2, %2, -128 \n\t" -+ -+ "xvmulsp 44, 36, %x3 \n\t" -+ "xvmulsp 45, 37, %x3 \n\t" -+ "xvmulsp 46, 38, %x3 \n\t" -+ "xvmulsp 47, 39, %x3 \n\t" -+ -+ "stxvw4x 40, 0, %2 \n\t" -+ "stxvw4x 41, %4, %2 \n\t" -+ "stxvw4x 42, %5, %2 \n\t" -+ "stxvw4x 43, %6, %2 \n\t" -+ "stxvw4x 44, %7, %2 \n\t" -+ "stxvw4x 45, %8, %2 \n\t" -+ "stxvw4x 46, %9, %2 \n\t" -+ "stxvw4x 47, %10, %2 \n" -+ -+ "#n=%1 alpha=%3 x=%0=%2 o16=%4 o32=%5 o48=%6 o64=%7 o80=%8 o96=%9 o112=%10" -+ : -+ "+m" (*x), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+f" (alpha) // 3 -+ : -+ "b" (16), // 4 -+ "b" (32), // 5 -+ "b" (48), // 6 -+ "b" (64), // 7 -+ "b" (80), // 8 -+ "b" (96), // 9 -+ "b" (112) // 10 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -+ -+ -+static void sscal_kernel_16_zero (long n, float *x) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *x2=x+1; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "xxlxor 32 , 32 , 32 \n\t" -- "addi %1, %1, -4 \n\t" -- -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "stxvw4x 32, 0, %1 \n\t" -- "stxvw4x 32, %5, %1 \n\t" -- "stxvw4x 32, %6, %1 \n\t" -- "stxvw4x 32, %7, %1 \n\t" -- "stxvw4x 32, %8, %1 \n\t" -- "stxvw4x 32, %9, %1 \n\t" -- "stxvw4x 32, %10, %1 \n\t" -- "stxvw4x 32, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (x2), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __vector float t0; -+ -+ __asm__ -+ ( -+ "xxlxor %x3, %x3, %x3 \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "stxvw4x %x3, 0, %2 \n\t" -+ "stxvw4x %x3, %4, %2 \n\t" -+ "stxvw4x %x3, %5, %2 \n\t" -+ "stxvw4x %x3, %6, %2 \n\t" -+ "stxvw4x %x3, %7, %2 \n\t" -+ "stxvw4x %x3, %8, %2 \n\t" -+ "stxvw4x %x3, %9, %2 \n\t" -+ "stxvw4x %x3, %10, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -32 \n\t" -+ "bgt 1b \n" -+ -+ "#n=%1 x=%0=%2 t0=%x3 o16=%4 o32=%5 o48=%6 o64=%7 o80=%8 o96=%9 o112=%10" -+ : -+ "=m" (*x), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0) // 3 -+ : -+ "b" (16), // 4 -+ "b" (32), // 5 -+ "b" (48), // 6 -+ "b" (64), // 7 -+ "b" (80), // 8 -+ "b" (96), // 9 -+ "b" (112) // 10 -+ : -+ "cr0" -+ ); -+} -diff --git a/kernel/power/sswap_microk_power8.c b/kernel/power/sswap_microk_power8.c -index c48e743..d44f167 100644 ---- a/kernel/power/sswap_microk_power8.c -+++ b/kernel/power/sswap_microk_power8.c -@@ -35,102 +35,74 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_32 1 - --static void sswap_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void sswap_kernel_32( BLASLONG n, FLOAT *x, FLOAT *y) -+static void sswap_kernel_32 (long n, float *x, float *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *x2=x+1; -- FLOAT *y2=y+1; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "addi %3, %3, -4 \n\t" -- "addi %4, %4, -4 \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "lxvw4x 32, 0, %2 \n\t" -- "lxvw4x 33, %5, %2 \n\t" -- "lxvw4x 34, %6, %2 \n\t" -- "lxvw4x 35, %7, %2 \n\t" -- "lxvw4x 36, %8, %2 \n\t" -- "lxvw4x 37, %9, %2 \n\t" -- "lxvw4x 38, %10, %2 \n\t" -- "lxvw4x 39, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvw4x 48, 0, %1 \n\t" -- "lxvw4x 49, %5, %1 \n\t" -- "lxvw4x 50, %6, %1 \n\t" -- "lxvw4x 51, %7, %1 \n\t" -- "lxvw4x 52, %8, %1 \n\t" -- "lxvw4x 53, %9, %1 \n\t" -- "lxvw4x 54, %10, %1 \n\t" -- "lxvw4x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvw4x 32, 0, %3 \n\t" -- "stxvw4x 33, %5, %3 \n\t" -- "stxvw4x 34, %6, %3 \n\t" -- "stxvw4x 35, %7, %3 \n\t" -- "stxvw4x 36, %8, %3 \n\t" -- "stxvw4x 37, %9, %3 \n\t" -- "stxvw4x 38, %10, %3 \n\t" -- "stxvw4x 39, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvw4x 48, 0, %4 \n\t" -- "stxvw4x 49, %5, %4 \n\t" -- "stxvw4x 50, %6, %4 \n\t" -- "stxvw4x 51, %7, %4 \n\t" -- "stxvw4x 52, %8, %4 \n\t" -- "stxvw4x 53, %9, %4 \n\t" -- "stxvw4x 54, %10, %4 \n\t" -- "stxvw4x 55, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "addic. %0 , %0 , -32 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (y2), // 3 -- "r" (x2), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "%3", "%4", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "lxvw4x 32, 0, %4 \n\t" -+ "lxvw4x 33, %5, %4 \n\t" -+ "lxvw4x 34, %6, %4 \n\t" -+ "lxvw4x 35, %7, %4 \n\t" -+ "lxvw4x 36, %8, %4 \n\t" -+ "lxvw4x 37, %9, %4 \n\t" -+ "lxvw4x 38, %10, %4 \n\t" -+ "lxvw4x 39, %11, %4 \n\t" -+ -+ "lxvw4x 40, 0, %3 \n\t" -+ "lxvw4x 41, %5, %3 \n\t" -+ "lxvw4x 42, %6, %3 \n\t" -+ "lxvw4x 43, %7, %3 \n\t" -+ "lxvw4x 44, %8, %3 \n\t" -+ "lxvw4x 45, %9, %3 \n\t" -+ "lxvw4x 46, %10, %3 \n\t" -+ "lxvw4x 47, %11, %3 \n\t" -+ -+ "stxvw4x 32, 0, %3 \n\t" -+ "stxvw4x 33, %5, %3 \n\t" -+ "stxvw4x 34, %6, %3 \n\t" -+ "stxvw4x 35, %7, %3 \n\t" -+ "stxvw4x 36, %8, %3 \n\t" -+ "stxvw4x 37, %9, %3 \n\t" -+ "stxvw4x 38, %10, %3 \n\t" -+ "stxvw4x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvw4x 40, 0, %4 \n\t" -+ "stxvw4x 41, %5, %4 \n\t" -+ "stxvw4x 42, %6, %4 \n\t" -+ "stxvw4x 43, %7, %4 \n\t" -+ "stxvw4x 44, %8, %4 \n\t" -+ "stxvw4x 45, %9, %4 \n\t" -+ "stxvw4x 46, %10, %4 \n\t" -+ "stxvw4x 47, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "addic. %2, %2, -32 \n\t" -+ "bgt 1b \n" -+ -+ "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "+m" (*x), -+ "+m" (*y), -+ "+r" (n), // 2 -+ "+b" (x), // 3 -+ "+b" (y) // 4 -+ : -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -diff --git a/kernel/power/zasum.c b/kernel/power/zasum.c -index abd6ec0..0b6b87d 100644 ---- a/kernel/power/zasum.c -+++ b/kernel/power/zasum.c -@@ -53,7 +53,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_8 - --static void zasum_kernel_8(BLASLONG n, FLOAT *x1, FLOAT *svec) -+static FLOAT zasum_kernel_8(BLASLONG n, FLOAT *x1) - { - - BLASLONG i=0; -@@ -92,9 +92,7 @@ static void zasum_kernel_8(BLASLONG n, FLOAT *x1, FLOAT *svec) - - } - -- svec[0] = sum0+sum1+sum2+sum3; -- svec[1] = 0.0; -- -+ return sum0+sum1+sum2+sum3; - } - - #endif -@@ -104,7 +102,6 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - BLASLONG i=0; - BLASLONG ip=0; - FLOAT sumf = 0.0; -- FLOAT svec[2] __attribute__ ((aligned (16)));; - BLASLONG n1; - BLASLONG inc_x2; - -@@ -117,8 +114,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) - if ( n1 > 0 ) - { - -- zasum_kernel_8(n1, x, svec); -- sumf = svec[0] + svec[1]; -+ sumf = zasum_kernel_8(n1, x); - i=n1; - ip=2*n1; - } -diff --git a/kernel/power/zasum_microk_power8.c b/kernel/power/zasum_microk_power8.c -index b9f6c0a..8236690 100644 ---- a/kernel/power/zasum_microk_power8.c -+++ b/kernel/power/zasum_microk_power8.c -@@ -34,144 +34,140 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_8 1 --static void zasum_kernel_8( BLASLONG n, FLOAT *x, FLOAT *svec) __attribute__ ((noinline)); - --static void zasum_kernel_8( BLASLONG n, FLOAT *x, FLOAT *svec) -+static double zasum_kernel_8 (long n, double *x) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "dcbt %2 , %4 \n\t" -- -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2 , %4 \n\t" -- -- "xvabsdp 48, 40 \n\t" -- "xvabsdp 49, 41 \n\t" -- "xvabsdp 50, 42 \n\t" -- "xvabsdp 51, 43 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- -- "xvabsdp 52, 44 \n\t" -- "xvabsdp 53, 45 \n\t" -- -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- -- "xvabsdp 54, 46 \n\t" -- "xvabsdp 55, 47 \n\t" -- -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- -- "xvadddp 32, 32, 48 \n\t" -- "xvadddp 33, 33, 49 \n\t" -- -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "xvadddp 34, 34, 50 \n\t" -- "xvadddp 35, 35, 51 \n\t" -- "addi %2, %2, 128 \n\t" -- "xvadddp 36, 36, 52 \n\t" -- "xvadddp 37, 37, 53 \n\t" -- "addic. %0 , %0 , -8 \n\t" -- "xvadddp 38, 38, 54 \n\t" -- "xvadddp 39, 39, 55 \n\t" -- -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- -- "xvabsdp 48, 40 \n\t" -- "xvabsdp 49, 41 \n\t" -- "xvabsdp 50, 42 \n\t" -- "xvabsdp 51, 43 \n\t" -- "xvabsdp 52, 44 \n\t" -- "xvabsdp 53, 45 \n\t" -- "xvabsdp 54, 46 \n\t" -- "xvabsdp 55, 47 \n\t" -- -- "xvadddp 32, 32, 48 \n\t" -- "xvadddp 33, 33, 49 \n\t" -- "xvadddp 34, 34, 50 \n\t" -- "xvadddp 35, 35, 51 \n\t" -- "xvadddp 36, 36, 52 \n\t" -- "xvadddp 37, 37, 53 \n\t" -- "xvadddp 38, 38, 54 \n\t" -- "xvadddp 39, 39, 55 \n\t" -- -- "xvadddp 32, 32, 33 \n\t" -- "xvadddp 34, 34, 35 \n\t" -- "xvadddp 36, 36, 37 \n\t" -- "xvadddp 38, 38, 39 \n\t" -- -- "xvadddp 32, 32, 34 \n\t" -- "xvadddp 36, 36, 38 \n\t" -- -- "xvadddp 32, 32, 36 \n\t" -- -- -- "stxvd2x 32, 0, %3 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (svec), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2", "memory" -- ); -- --} -- -- -+ double sum; -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %8, %2 \n\t" -+ "lxvd2x 42, %9, %2 \n\t" -+ "lxvd2x 43, %10, %2 \n\t" -+ "lxvd2x 44, %11, %2 \n\t" -+ "lxvd2x 45, %12, %2 \n\t" -+ "lxvd2x 46, %13, %2 \n\t" -+ "lxvd2x 47, %14, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -8 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvabsdp 48, 40 \n\t" -+ "xvabsdp 49, 41 \n\t" -+ "xvabsdp 50, 42 \n\t" -+ "xvabsdp 51, 43 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %8, %2 \n\t" -+ -+ "xvabsdp %x3, 44 \n\t" -+ "xvabsdp %x4, 45 \n\t" -+ -+ "lxvd2x 42, %9, %2 \n\t" -+ "lxvd2x 43, %10, %2 \n\t" -+ -+ "xvabsdp %x5, 46 \n\t" -+ "xvabsdp %x6, 47 \n\t" -+ -+ "lxvd2x 44, %11, %2 \n\t" -+ "lxvd2x 45, %12, %2 \n\t" -+ -+ "xvadddp 32, 32, 48 \n\t" -+ "xvadddp 33, 33, 49 \n\t" -+ -+ "lxvd2x 46, %13, %2 \n\t" -+ "lxvd2x 47, %14, %2 \n\t" -+ -+ "xvadddp 34, 34, 50 \n\t" -+ "xvadddp 35, 35, 51 \n\t" -+ "addi %2, %2, 128 \n\t" -+ "xvadddp 36, 36, %x3 \n\t" -+ "xvadddp 37, 37, %x4 \n\t" -+ "addic. %1, %1, -8 \n\t" -+ "xvadddp 38, 38, %x5 \n\t" -+ "xvadddp 39, 39, %x6 \n\t" -+ -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvabsdp 48, 40 \n\t" -+ "xvabsdp 49, 41 \n\t" -+ "xvabsdp 50, 42 \n\t" -+ "xvabsdp 51, 43 \n\t" -+ "xvabsdp %x3, 44 \n\t" -+ "xvabsdp %x4, 45 \n\t" -+ "xvabsdp %x5, 46 \n\t" -+ "xvabsdp %x6, 47 \n\t" -+ -+ "xvadddp 32, 32, 48 \n\t" -+ "xvadddp 33, 33, 49 \n\t" -+ "xvadddp 34, 34, 50 \n\t" -+ "xvadddp 35, 35, 51 \n\t" -+ "xvadddp 36, 36, %x3 \n\t" -+ "xvadddp 37, 37, %x4 \n\t" -+ "xvadddp 38, 38, %x5 \n\t" -+ "xvadddp 39, 39, %x6 \n\t" -+ -+ "xvadddp 32, 32, 33 \n\t" -+ "xvadddp 34, 34, 35 \n\t" -+ "xvadddp 36, 36, 37 \n\t" -+ "xvadddp 38, 38, 39 \n\t" -+ -+ "xvadddp 32, 32, 34 \n\t" -+ "xvadddp 36, 36, 38 \n\t" -+ -+ "xvadddp 32, 32, 36 \n\t" -+ -+ "xxswapd 33, 32 \n\t" -+ "xsadddp %x0, 32, 33 \n" -+ -+ "#n=%1 x=%3=%2 sum=%0 o16=%8 o32=%9 o48=%10 o64=%11 o80=%12 o96=%13 o112=%14\n" -+ "#t0=%x3 t1=%x4 t2=%x5 t3=%x6" -+ : -+ "=d" (sum), // 0 -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0), // 3 -+ "=wa" (t1), // 4 -+ "=wa" (t2), // 5 -+ "=wa" (t3) // 6 -+ : -+ "m" (*x), -+ "b" (16), // 8 -+ "b" (32), // 9 -+ "b" (48), // 10 -+ "b" (64), // 11 -+ "b" (80), // 12 -+ "b" (96), // 13 -+ "b" (112) // 14 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+ -+ return sum; -+} -diff --git a/kernel/power/zaxpy.c b/kernel/power/zaxpy.c -index 0ee0c1b..dd7ab6c 100644 ---- a/kernel/power/zaxpy.c -+++ b/kernel/power/zaxpy.c -@@ -78,7 +78,6 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, - { - BLASLONG i=0; - BLASLONG ix=0,iy=0; -- FLOAT da[4]; - - if ( n <= 0 ) return(0); - -@@ -89,11 +88,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, - - if ( n1 ) - { -- da[0] = da_r; -- da[1] = da_r; -- da[2] = da_i; -- da[3] = da_i; -- zaxpy_kernel_4(n1, x, y , da ); -+ zaxpy_kernel_4 (n1, x, y, da_r, da_i); - ix = 2 * n1; - } - i = n1; -diff --git a/kernel/power/zaxpy_microk_power8.c b/kernel/power/zaxpy_microk_power8.c -index c8a529f..124614f 100644 ---- a/kernel/power/zaxpy_microk_power8.c -+++ b/kernel/power/zaxpy_microk_power8.c -@@ -35,216 +35,225 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - #define HAVE_KERNEL_4 1 --static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *alpha) __attribute__ ((noinline)); -- --static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) -+static void zaxpy_kernel_4 (long n, double *x, double *y, -+ double alpha_r, double alpha_i) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *y2=y+1; -- BLASLONG pre = 384; -- - #if !defined(CONJ) -- FLOAT mvec[2] = { -1.0, 1.0 }; -+ static const double mvec[2] = { -1.0, 1.0 }; - #else -- FLOAT mvec[2] = { 1.0, -1.0 }; -+ static const double mvec[2] = { 1.0, -1.0 }; - #endif -- -- -- __asm__ __volatile__ -- ( -- -- "lxsdx 34, 0 , %4 \n\t" // alpha_r -- "lxsdx 35, %5, %4 \n\t" // alpha_i -- "xxspltd 32, 34, 0 \n\t" -- "xxspltd 33, 35, 0 \n\t" -- -- "lxvd2x 36, 0, %9 \n\t" // mvec -+ const double *mvecp = mvec; -+ -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ __vector double t4; -+ __vector double t5; -+ __vector double t6; -+ __vector double t7; -+ __vector double t8; -+ __vector double t9; -+ __vector double t10; -+ __vector double t11; -+ long ytmp; -+ -+ __asm__ -+ ( -+ "xxspltd 32, %x19, 0 \n\t" // alpha_r -+ "xxspltd 33, %x20, 0 \n\t" // alpha_i -+ -+ "lxvd2x 36, 0, %21 \n\t" // mvec - - #if !defined(CONJ) -- "xvmuldp 33, 33 , 36 \n\t" // alpha_i * mvec -+ "xvmuldp 33, 33, 36 \n\t" // alpha_i * mvec - #else -- "xvmuldp 32, 32 , 36 \n\t" // alpha_r * mvec -+ "xvmuldp 32, 32, 36 \n\t" // alpha_r * mvec - #endif - -- "addi %8, %8, -8 \n\t" -- -- "dcbt %2, %10 \n\t" -- "dcbt %3, %10 \n\t" -- -- -- "lxvd2x 40, 0, %2 \n\t" // x0 -- "lxvd2x 41, %5, %2 \n\t" // x1 -- "lxvd2x 42, %6, %2 \n\t" // x2 -- "lxvd2x 43, %7, %2 \n\t" // x3 -- -- "lxvd2x 48, 0, %3 \n\t" // y0 -- "lxvd2x 49, %5, %3 \n\t" // y1 -- "lxvd2x 50, %6, %3 \n\t" // y2 -- "lxvd2x 51, %7, %3 \n\t" // y3 -- -- "xxswapd 56, 40 \n\t" // exchange real and imag part -- "xxswapd 57, 41 \n\t" // exchange real and imag part -- "xxswapd 58, 42 \n\t" // exchange real and imag part -- "xxswapd 59, 43 \n\t" // exchange real and imag part -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "lxvd2x 44, 0, %2 \n\t" // x4 -- "lxvd2x 45, %5, %2 \n\t" // x5 -- "lxvd2x 46, %6, %2 \n\t" // x6 -- "lxvd2x 47, %7, %2 \n\t" // x7 -- -- "lxvd2x 52, 0, %3 \n\t" // y4 -- "lxvd2x 53, %5, %3 \n\t" // y5 -- "lxvd2x 54, %6, %3 \n\t" // y6 -- "lxvd2x 55, %7, %3 \n\t" // y7 -- -- "xxswapd 60, 44 \n\t" // exchange real and imag part -- "xxswapd 61, 45 \n\t" // exchange real and imag part -- "xxswapd 62, 46 \n\t" // exchange real and imag part -- "xxswapd 63, 47 \n\t" // exchange real and imag part -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %10 \n\t" -- "dcbt %3, %10 \n\t" -- -- "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i -- "xvmaddadp 49, 41, 32 \n\t" -- "lxvd2x 40, 0, %2 \n\t" // x0 -- "lxvd2x 41, %5, %2 \n\t" // x1 -- "xvmaddadp 50, 42, 32 \n\t" -- "xvmaddadp 51, 43, 32 \n\t" -- "lxvd2x 42, %6, %2 \n\t" // x2 -- "lxvd2x 43, %7, %2 \n\t" // x3 -- -- "xvmaddadp 52, 44, 32 \n\t" -- "addi %2, %2, 64 \n\t" -- "xvmaddadp 53, 45, 32 \n\t" -- "lxvd2x 44, 0, %2 \n\t" // x4 -- "lxvd2x 45, %5, %2 \n\t" // x5 -- "xvmaddadp 54, 46, 32 \n\t" -- "xvmaddadp 55, 47, 32 \n\t" -- "lxvd2x 46, %6, %2 \n\t" // x6 -- "lxvd2x 47, %7, %2 \n\t" // x7 -- -- "xvmaddadp 48, 56, 33 \n\t" // alpha_i * x0_i , alpha_i * x0_r -- "addi %2, %2, 64 \n\t" -- "xvmaddadp 49, 57, 33 \n\t" -- "xvmaddadp 50, 58, 33 \n\t" -- "xvmaddadp 51, 59, 33 \n\t" -- -- "xvmaddadp 52, 60, 33 \n\t" -- "xvmaddadp 53, 61, 33 \n\t" -- "xvmaddadp 54, 62, 33 \n\t" -- "xvmaddadp 55, 63, 33 \n\t" -- -- "stxvd2x 48, 0, %8 \n\t" -- "stxvd2x 49, %5, %8 \n\t" -- "stxvd2x 50, %6, %8 \n\t" -- "stxvd2x 51, %7, %8 \n\t" -- -- "addi %8, %8, 64 \n\t" -- -- "stxvd2x 52, 0, %8 \n\t" -- "stxvd2x 53, %5, %8 \n\t" -- "stxvd2x 54, %6, %8 \n\t" -- "stxvd2x 55, %7, %8 \n\t" -- -- "addi %8, %8, 64 \n\t" -- -- "xxswapd 56, 40 \n\t" // exchange real and imag part -- "xxswapd 57, 41 \n\t" // exchange real and imag part -- "lxvd2x 48, 0, %3 \n\t" // y0 -- "lxvd2x 49, %5, %3 \n\t" // y1 -- "xxswapd 58, 42 \n\t" // exchange real and imag part -- "xxswapd 59, 43 \n\t" // exchange real and imag part -- "lxvd2x 50, %6, %3 \n\t" // y2 -- "lxvd2x 51, %7, %3 \n\t" // y3 -- -- "xxswapd 60, 44 \n\t" // exchange real and imag part -- "addi %3, %3, 64 \n\t" -- "xxswapd 61, 45 \n\t" // exchange real and imag part -- "lxvd2x 52, 0, %3 \n\t" // y4 -- "lxvd2x 53, %5, %3 \n\t" // y5 -- "xxswapd 62, 46 \n\t" // exchange real and imag part -- "xxswapd 63, 47 \n\t" // exchange real and imag part -- "lxvd2x 54, %6, %3 \n\t" // y6 -- "lxvd2x 55, %7, %3 \n\t" // y7 -- -- "addi %3, %3, 64 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i -- "xvmaddadp 49, 41, 32 \n\t" -- "xvmaddadp 50, 42, 32 \n\t" -- "xvmaddadp 51, 43, 32 \n\t" -- -- "xvmaddadp 52, 44, 32 \n\t" -- "xvmaddadp 53, 45, 32 \n\t" -- "xvmaddadp 54, 46, 32 \n\t" -- "xvmaddadp 55, 47, 32 \n\t" -- -- "xvmaddadp 48, 56, 33 \n\t" // alpha_i * x0_i , alpha_i * x0_r -- "xvmaddadp 49, 57, 33 \n\t" -- "xvmaddadp 50, 58, 33 \n\t" -- "xvmaddadp 51, 59, 33 \n\t" -- -- "xvmaddadp 52, 60, 33 \n\t" -- "xvmaddadp 53, 61, 33 \n\t" -- "xvmaddadp 54, 62, 33 \n\t" -- "xvmaddadp 55, 63, 33 \n\t" -- -- -- "stxvd2x 48, 0, %8 \n\t" -- "stxvd2x 49, %5, %8 \n\t" -- "stxvd2x 50, %6, %8 \n\t" -- "stxvd2x 51, %7, %8 \n\t" -- -- "addi %8, %8, 64 \n\t" -- -- "stxvd2x 52, 0, %8 \n\t" -- "stxvd2x 53, %5, %8 \n\t" -- "stxvd2x 54, %6, %8 \n\t" -- "stxvd2x 55, %7, %8 \n\t" -- -- "addi %8, %8, 64 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (y1), // 3 -- "r" (alpha), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (y2), // 8 -- "r" (mvec), // 9 -- "r" (pre) // 10 -- : "cr0", "%0", "%2" , "%3", "%8", "memory" -- ); -- --} -- -- -+ "mr %16, %3 \n\t" -+ "dcbt 0, %2 \n\t" -+ "dcbt 0, %3 \n\t" -+ -+ -+ "lxvd2x 40, 0, %2 \n\t" // x0 -+ "lxvd2x 41, %22, %2 \n\t" // x1 -+ "lxvd2x 42, %23, %2 \n\t" // x2 -+ "lxvd2x 43, %24, %2 \n\t" // x3 -+ -+ "lxvd2x 48, 0, %3 \n\t" // y0 -+ "lxvd2x 49, %22, %3 \n\t" // y1 -+ "lxvd2x 50, %23, %3 \n\t" // y2 -+ "lxvd2x 51, %24, %3 \n\t" // y3 -+ -+ "xxswapd %x8, 40 \n\t" // exchange real and imag part -+ "xxswapd %x9, 41 \n\t" // exchange real and imag part -+ "xxswapd %x10, 42 \n\t" // exchange real and imag part -+ "xxswapd %x11, 43 \n\t" // exchange real and imag part -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" -+ -+ "lxvd2x 44, 0, %2 \n\t" // x4 -+ "lxvd2x 45, %22, %2 \n\t" // x5 -+ "lxvd2x 46, %23, %2 \n\t" // x6 -+ "lxvd2x 47, %24, %2 \n\t" // x7 -+ -+ "lxvd2x %x4, 0, %3 \n\t" // y4 -+ "lxvd2x %x5, %22, %3 \n\t" // y5 -+ "lxvd2x %x6, %23, %3 \n\t" // y6 -+ "lxvd2x %x7, %24, %3 \n\t" // y7 -+ -+ "xxswapd %x12, 44 \n\t" // exchange real and imag part -+ "xxswapd %x13, 45 \n\t" // exchange real and imag part -+ "xxswapd %x14, 46 \n\t" // exchange real and imag part -+ "xxswapd %x15, 47 \n\t" // exchange real and imag part -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" -+ -+ "addic. %1, %1, -8 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i -+ "xvmaddadp 49, 41, 32 \n\t" -+ "lxvd2x 40, 0, %2 \n\t" // x0 -+ "lxvd2x 41, %22, %2 \n\t" // x1 -+ "xvmaddadp 50, 42, 32 \n\t" -+ "xvmaddadp 51, 43, 32 \n\t" -+ "lxvd2x 42, %23, %2 \n\t" // x2 -+ "lxvd2x 43, %24, %2 \n\t" // x3 -+ -+ "xvmaddadp %x4, 44, 32 \n\t" -+ "addi %2, %2, 64 \n\t" -+ "xvmaddadp %x5, 45, 32 \n\t" -+ "lxvd2x 44, 0, %2 \n\t" // x4 -+ "lxvd2x 45, %22, %2 \n\t" // x5 -+ "xvmaddadp %x6, 46, 32 \n\t" -+ "xvmaddadp %x7, 47, 32 \n\t" -+ "lxvd2x 46, %23, %2 \n\t" // x6 -+ "lxvd2x 47, %24, %2 \n\t" // x7 -+ -+ "xvmaddadp 48, %x8, 33 \n\t" // alpha_i * x0_i , alpha_i * x0_r -+ "addi %2, %2, 64 \n\t" -+ "xvmaddadp 49, %x9, 33 \n\t" -+ "xvmaddadp 50, %x10, 33 \n\t" -+ "xvmaddadp 51, %x11, 33 \n\t" -+ -+ "xvmaddadp %x4, %x12, 33 \n\t" -+ "xvmaddadp %x5, %x13, 33 \n\t" -+ "xvmaddadp %x6, %x14, 33 \n\t" -+ "xvmaddadp %x7, %x15, 33 \n\t" -+ -+ "stxvd2x 48, 0, %16 \n\t" -+ "stxvd2x 49, %22, %16 \n\t" -+ "stxvd2x 50, %23, %16 \n\t" -+ "stxvd2x 51, %24, %16 \n\t" -+ -+ "addi %16, %16, 64 \n\t" -+ -+ "stxvd2x %x4, 0, %16 \n\t" -+ "stxvd2x %x5, %22, %16 \n\t" -+ "stxvd2x %x6, %23, %16 \n\t" -+ "stxvd2x %x7, %24, %16 \n\t" -+ -+ "addi %16, %16, 64 \n\t" -+ -+ "xxswapd %x8, 40 \n\t" // exchange real and imag part -+ "xxswapd %x9, 41 \n\t" // exchange real and imag part -+ "lxvd2x 48, 0, %3 \n\t" // y0 -+ "lxvd2x 49, %22, %3 \n\t" // y1 -+ "xxswapd %x10, 42 \n\t" // exchange real and imag part -+ "xxswapd %x11, 43 \n\t" // exchange real and imag part -+ "lxvd2x 50, %23, %3 \n\t" // y2 -+ "lxvd2x 51, %24, %3 \n\t" // y3 -+ -+ "xxswapd %x12, 44 \n\t" // exchange real and imag part -+ "addi %3, %3, 64 \n\t" -+ "xxswapd %x13, 45 \n\t" // exchange real and imag part -+ "lxvd2x %x4, 0, %3 \n\t" // y4 -+ "lxvd2x %x5, %22, %3 \n\t" // y5 -+ "xxswapd %x14, 46 \n\t" // exchange real and imag part -+ "xxswapd %x15, 47 \n\t" // exchange real and imag part -+ "lxvd2x %x6, %23, %3 \n\t" // y6 -+ "lxvd2x %x7, %24, %3 \n\t" // y7 -+ -+ "addi %3, %3, 64 \n\t" -+ -+ "addic. %1, %1, -8 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmaddadp 48, 40, 32 \n\t" // alpha_r * x0_r , alpha_r * x0_i -+ "xvmaddadp 49, 41, 32 \n\t" -+ "xvmaddadp 50, 42, 32 \n\t" -+ "xvmaddadp 51, 43, 32 \n\t" -+ -+ "xvmaddadp %x4, 44, 32 \n\t" -+ "xvmaddadp %x5, 45, 32 \n\t" -+ "xvmaddadp %x6, 46, 32 \n\t" -+ "xvmaddadp %x7, 47, 32 \n\t" -+ -+ "xvmaddadp 48, %x8, 33 \n\t" // alpha_i * x0_i , alpha_i * x0_r -+ "xvmaddadp 49, %x9, 33 \n\t" -+ "xvmaddadp 50, %x10, 33 \n\t" -+ "xvmaddadp 51, %x11, 33 \n\t" -+ -+ "xvmaddadp %x4, %x12, 33 \n\t" -+ "xvmaddadp %x5, %x13, 33 \n\t" -+ "xvmaddadp %x6, %x14, 33 \n\t" -+ "xvmaddadp %x7, %x15, 33 \n\t" -+ -+ "stxvd2x 48, 0, %16 \n\t" -+ "stxvd2x 49, %22, %16 \n\t" -+ "stxvd2x 50, %23, %16 \n\t" -+ "stxvd2x 51, %24, %16 \n\t" -+ -+ "addi %16, %16, 64 \n\t" -+ -+ "stxvd2x %x4, 0, %16 \n\t" -+ "stxvd2x %x5, %22, %16 \n\t" -+ "stxvd2x %x6, %23, %16 \n\t" -+ "stxvd2x %x7, %24, %16 \n" -+ -+ "#n=%1 x=%17=%2 y=%0=%3 alpha=(%19,%20) mvecp=%18=%16 o16=%22 o32=%23 o48=%24 ytmp=%16\n" -+ "#t0=%x4 t1=%x5 t2=%x6 t3=%x7 t4=%x8 t5=%x9 t6=%x10 t7=%x11 t8=%x12 t9=%x13 t10=%x14 t11=%x15" -+ : -+ "+m" (*y), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y), // 3 -+ "=wa" (t0), // 4 -+ "=wa" (t1), // 5 -+ "=wa" (t2), // 6 -+ "=wa" (t3), // 7 -+ "=wa" (t4), // 8 -+ "=wa" (t5), // 9 -+ "=wa" (t6), // 10 -+ "=wa" (t7), // 11 -+ "=wa" (t8), // 12 -+ "=wa" (t9), // 13 -+ "=wa" (t10), // 14 -+ "=wa" (t11), // 15 -+ "=b" (ytmp) // 16 -+ : -+ "m" (*x), -+ "m" (*mvecp), -+ "d" (alpha_r), // 19 -+ "d" (alpha_i), // 20 -+ "16" (mvecp), // 21 -+ "b" (16), // 22 -+ "b" (32), // 23 -+ "b" (48) // 24 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+} -diff --git a/kernel/power/zcopy_microk_power8.c b/kernel/power/zcopy_microk_power8.c -index 73abe08..5ca34b6 100644 ---- a/kernel/power/zcopy_microk_power8.c -+++ b/kernel/power/zcopy_microk_power8.c -@@ -35,140 +35,121 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_16 1 - --static void zcopy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void zcopy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y) -+static void zcopy_kernel_16 (long n, FLOAT *x, FLOAT *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvd2x 50, 0, %2 \n\t" -- "lxvd2x 51, %5, %2 \n\t" -- "lxvd2x 52, %6, %2 \n\t" -- "lxvd2x 53, %7, %2 \n\t" -- "lxvd2x 54, %8, %2 \n\t" -- "lxvd2x 55, %9, %2 \n\t" -- "lxvd2x 56, %10, %2 \n\t" -- "lxvd2x 57, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "stxvd2x 40, 0, %1 \n\t" -- "stxvd2x 41, %5, %1 \n\t" -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "stxvd2x 42, %6, %1 \n\t" -- "stxvd2x 43, %7, %1 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "stxvd2x 44, %8, %1 \n\t" -- "stxvd2x 45, %9, %1 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "stxvd2x 46, %10, %1 \n\t" -- "stxvd2x 47, %11, %1 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "stxvd2x 50, 0, %1 \n\t" -- "stxvd2x 51, %5, %1 \n\t" -- "lxvd2x 50, 0, %2 \n\t" -- "lxvd2x 51, %5, %2 \n\t" -- "stxvd2x 52, %6, %1 \n\t" -- "stxvd2x 53, %7, %1 \n\t" -- "lxvd2x 52, %6, %2 \n\t" -- "lxvd2x 53, %7, %2 \n\t" -- "stxvd2x 54, %8, %1 \n\t" -- "stxvd2x 55, %9, %1 \n\t" -- "lxvd2x 54, %8, %2 \n\t" -- "lxvd2x 55, %9, %2 \n\t" -- "stxvd2x 56, %10, %1 \n\t" -- "stxvd2x 57, %11, %1 \n\t" -- "lxvd2x 56, %10, %2 \n\t" -- "lxvd2x 57, %11, %2 \n\t" -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "stxvd2x 40, 0, %1 \n\t" -- "stxvd2x 41, %5, %1 \n\t" -- "stxvd2x 42, %6, %1 \n\t" -- "stxvd2x 43, %7, %1 \n\t" -- "stxvd2x 44, %8, %1 \n\t" -- "stxvd2x 45, %9, %1 \n\t" -- "stxvd2x 46, %10, %1 \n\t" -- "stxvd2x 47, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvd2x 50, 0, %1 \n\t" -- "stxvd2x 51, %5, %1 \n\t" -- "stxvd2x 52, %6, %1 \n\t" -- "stxvd2x 53, %7, %1 \n\t" -- "stxvd2x 54, %8, %1 \n\t" -- "stxvd2x 55, %9, %1 \n\t" -- "stxvd2x 56, %10, %1 \n\t" -- "stxvd2x 57, %11, %1 \n\t" -- -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ "lxvd2x 32, 0, %2 \n\t" -+ "lxvd2x 33, %5, %2 \n\t" -+ "lxvd2x 34, %6, %2 \n\t" -+ "lxvd2x 35, %7, %2 \n\t" -+ "lxvd2x 36, %8, %2 \n\t" -+ "lxvd2x 37, %9, %2 \n\t" -+ "lxvd2x 38, %10, %2 \n\t" -+ "lxvd2x 39, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %5, %2 \n\t" -+ "lxvd2x 42, %6, %2 \n\t" -+ "lxvd2x 43, %7, %2 \n\t" -+ "lxvd2x 44, %8, %2 \n\t" -+ "lxvd2x 45, %9, %2 \n\t" -+ "lxvd2x 46, %10, %2 \n\t" -+ "lxvd2x 47, %11, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "stxvd2x 32, 0, %3 \n\t" -+ "stxvd2x 33, %5, %3 \n\t" -+ "lxvd2x 32, 0, %2 \n\t" -+ "lxvd2x 33, %5, %2 \n\t" -+ "stxvd2x 34, %6, %3 \n\t" -+ "stxvd2x 35, %7, %3 \n\t" -+ "lxvd2x 34, %6, %2 \n\t" -+ "lxvd2x 35, %7, %2 \n\t" -+ "stxvd2x 36, %8, %3 \n\t" -+ "stxvd2x 37, %9, %3 \n\t" -+ "lxvd2x 36, %8, %2 \n\t" -+ "lxvd2x 37, %9, %2 \n\t" -+ "stxvd2x 38, %10, %3 \n\t" -+ "stxvd2x 39, %11, %3 \n\t" -+ "lxvd2x 38, %10, %2 \n\t" -+ "lxvd2x 39, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "stxvd2x 40, 0, %3 \n\t" -+ "stxvd2x 41, %5, %3 \n\t" -+ "lxvd2x 40, 0, %2 \n\t" -+ "lxvd2x 41, %5, %2 \n\t" -+ "stxvd2x 42, %6, %3 \n\t" -+ "stxvd2x 43, %7, %3 \n\t" -+ "lxvd2x 42, %6, %2 \n\t" -+ "lxvd2x 43, %7, %2 \n\t" -+ "stxvd2x 44, %8, %3 \n\t" -+ "stxvd2x 45, %9, %3 \n\t" -+ "lxvd2x 44, %8, %2 \n\t" -+ "lxvd2x 45, %9, %2 \n\t" -+ "stxvd2x 46, %10, %3 \n\t" -+ "stxvd2x 47, %11, %3 \n\t" -+ "lxvd2x 46, %10, %2 \n\t" -+ "lxvd2x 47, %11, %2 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -16 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "stxvd2x 32, 0, %3 \n\t" -+ "stxvd2x 33, %5, %3 \n\t" -+ "stxvd2x 34, %6, %3 \n\t" -+ "stxvd2x 35, %7, %3 \n\t" -+ "stxvd2x 36, %8, %3 \n\t" -+ "stxvd2x 37, %9, %3 \n\t" -+ "stxvd2x 38, %10, %3 \n\t" -+ "stxvd2x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvd2x 40, 0, %3 \n\t" -+ "stxvd2x 41, %5, %3 \n\t" -+ "stxvd2x 42, %6, %3 \n\t" -+ "stxvd2x 43, %7, %3 \n\t" -+ "stxvd2x 44, %8, %3 \n\t" -+ "stxvd2x 45, %9, %3 \n\t" -+ "stxvd2x 46, %10, %3 \n\t" -+ "stxvd2x 47, %11, %3 \n" -+ -+ "#n=%1 x=%4=%2 y=%0=%3 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "=m" (*y), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y) // 3 -+ : -+ "m" (*x), -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47" -+ ); -+} -diff --git a/kernel/power/zdot.c b/kernel/power/zdot.c -index bc1a95e..b83f832 100644 ---- a/kernel/power/zdot.c -+++ b/kernel/power/zdot.c -@@ -43,8 +43,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_8 - --static void zdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) __attribute__ ((noinline)); -- - static void zdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) - { - BLASLONG register i = 0; -diff --git a/kernel/power/zdot_microk_power8.c b/kernel/power/zdot_microk_power8.c -index 296d3d4..71078b6 100644 ---- a/kernel/power/zdot_microk_power8.c -+++ b/kernel/power/zdot_microk_power8.c -@@ -34,186 +34,174 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - **************************************************************************************/ - - #define HAVE_KERNEL_8 1 --static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y , FLOAT *dot) __attribute__ ((noinline)); - --static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) -+static void zdot_kernel_8 (long n, double *x, double *y, double *dot) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- FLOAT *x1=x; -- FLOAT *y1=y; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- "xxlxor 32,32,32 \n\t" -- "xxlxor 33,33,33 \n\t" -- "xxlxor 34,34,34 \n\t" -- "xxlxor 35,35,35 \n\t" -- "xxlxor 36,36,36 \n\t" -- "xxlxor 37,37,37 \n\t" -- "xxlxor 38,38,38 \n\t" -- "xxlxor 39,39,39 \n\t" -- -- "dcbt %2, %8 \n\t" -- "dcbt %3, %8 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -- "lxvd2x 48, 0, %3 \n\t" // y0_r, y0_i -- "lxvd2x 41, %5, %2 \n\t" // x1_r, x1_i -- "lxvd2x 49, %5, %3 \n\t" // y1_r, y1_i -- "lxvd2x 42, %6, %2 \n\t" // x2_r, x2_i -- "lxvd2x 50, %6, %3 \n\t" // y2_r, y2_i -- "lxvd2x 43, %7, %2 \n\t" // x3_r, x3_i -- "lxvd2x 51, %7, %3 \n\t" // y3_r, y3_i -- -- "xxswapd 52,48 \n\t" // y0_i, y0_r -- "xxswapd 53,49 \n\t" // y1_i, y1_r -- "xxswapd 54,50 \n\t" // y2_i, y2_r -- "xxswapd 55,51 \n\t" // y3_i, y3_r -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- -- "lxvd2x 44, 0, %2 \n\t" // x0_r, x0_i -- "lxvd2x 56, 0, %3 \n\t" // y0_r, y0_i -- "lxvd2x 45, %5, %2 \n\t" // x1_r, x1_i -- "lxvd2x 57, %5, %3 \n\t" // y1_r, y1_i -- "lxvd2x 46, %6, %2 \n\t" // x2_r, x2_i -- "lxvd2x 58, %6, %3 \n\t" // y2_r, y2_i -- "lxvd2x 47, %7, %2 \n\t" // x3_r, x3_i -- "lxvd2x 59, %7, %3 \n\t" // y3_r, y3_i -- -- "xxswapd 60,56 \n\t" // y0_i, y0_r -- "xxswapd 61,57 \n\t" // y1_i, y1_r -- "xxswapd 62,58 \n\t" // y2_i, y2_r -- "xxswapd 63,59 \n\t" // y3_i, y3_r -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %8 \n\t" -- "dcbt %3, %8 \n\t" -- -- "xvmaddadp 32, 40, 48 \n\t" // x0_r * y0_r , x0_i * y0_i -- "lxvd2x 48, 0, %3 \n\t" // y0_r, y0_i -- "xvmaddadp 34, 41, 49 \n\t" // x1_r * y1_r , x1_i * y1_i -- "lxvd2x 49, %5, %3 \n\t" // y1_r, y1_i -- -- "xvmaddadp 36, 42, 50 \n\t" // x2_r * y2_r , x2_i * y2_i -- "lxvd2x 50, %6, %3 \n\t" // y2_r, y2_i -- "xvmaddadp 38, 43, 51 \n\t" // x3_r * y3_r , x3_i * y3_i -- "lxvd2x 51, %7, %3 \n\t" // y3_r, y3_i -- -- "xvmaddadp 33, 40, 52 \n\t" // x0_r * y0_i , x0_i * y0_r -- "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -- "xvmaddadp 35, 41, 53 \n\t" // x1_r * y1_i , x1_i * y1_r -- "lxvd2x 41, %5, %2 \n\t" // x1_r, x1_i -- -- "xvmaddadp 37, 42, 54 \n\t" // x2_r * y2_i , x2_i * y2_r -- "lxvd2x 42, %6, %2 \n\t" // x2_r, x2_i -- "xvmaddadp 39, 43, 55 \n\t" // x3_r * y3_i , x3_i * y3_r -- "lxvd2x 43, %7, %2 \n\t" // x3_r, x3_i -- -- "xxswapd 52,48 \n\t" // y0_i, y0_r -- "xxswapd 53,49 \n\t" // y1_i, y1_r -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "xxswapd 54,50 \n\t" // y2_i, y2_r -- "xxswapd 55,51 \n\t" // y3_i, y3_r -- -- "xvmaddadp 32, 44, 56 \n\t" // x0_r * y0_r , x0_i * y0_i -- "lxvd2x 56, 0, %3 \n\t" // y0_r, y0_i -- "xvmaddadp 34, 45, 57 \n\t" // x1_r * y1_r , x1_i * y1_i -- "lxvd2x 57, %5, %3 \n\t" // y1_r, y1_i -- "xvmaddadp 36, 46, 58 \n\t" // x2_r * y2_r , x2_i * y2_i -- "lxvd2x 58, %6, %3 \n\t" // y2_r, y2_i -- "xvmaddadp 38, 47, 59 \n\t" // x3_r * y3_r , x3_i * y3_i -- "lxvd2x 59, %7, %3 \n\t" // y3_r, y3_i -- -- "xvmaddadp 33, 44, 60 \n\t" // x0_r * y0_i , x0_i * y0_r -- "lxvd2x 44, 0, %2 \n\t" // x0_r, x0_i -- "xvmaddadp 35, 45, 61 \n\t" // x1_r * y1_i , x1_i * y1_r -- "lxvd2x 45, %5, %2 \n\t" // x1_r, x1_i -- "xvmaddadp 37, 46, 62 \n\t" // x2_r * y2_i , x2_i * y2_r -- "lxvd2x 46, %6, %2 \n\t" // x2_r, x2_i -- "xvmaddadp 39, 47, 63 \n\t" // x3_r * y3_i , x3_i * y3_r -- "lxvd2x 47, %7, %2 \n\t" // x3_r, x3_i -- -- "xxswapd 60,56 \n\t" // y0_i, y0_r -- "xxswapd 61,57 \n\t" // y1_i, y1_r -- -- "addi %2, %2, 64 \n\t" -- "addi %3, %3, 64 \n\t" -- -- "xxswapd 62,58 \n\t" // y2_i, y2_r -- "xxswapd 63,59 \n\t" // y3_i, y3_r -- -- "addic. %0 , %0 , -8 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmaddadp 32, 40, 48 \n\t" // x0_r * y0_r , x0_i * y0_i -- "xvmaddadp 34, 41, 49 \n\t" // x1_r * y1_r , x1_i * y1_i -- "xvmaddadp 36, 42, 50 \n\t" // x2_r * y2_r , x2_i * y2_i -- "xvmaddadp 38, 43, 51 \n\t" // x3_r * y3_r , x3_i * y3_i -- -- "xvmaddadp 33, 40, 52 \n\t" // x0_r * y0_i , x0_i * y0_r -- "xvmaddadp 35, 41, 53 \n\t" // x1_r * y1_i , x1_i * y1_r -- "xvmaddadp 37, 42, 54 \n\t" // x2_r * y2_i , x2_i * y2_r -- "xvmaddadp 39, 43, 55 \n\t" // x3_r * y3_i , x3_i * y3_r -- -- "xvmaddadp 32, 44, 56 \n\t" // x0_r * y0_r , x0_i * y0_i -- "xvmaddadp 34, 45, 57 \n\t" // x1_r * y1_r , x1_i * y1_i -- "xvmaddadp 36, 46, 58 \n\t" // x2_r * y2_r , x2_i * y2_i -- "xvmaddadp 38, 47, 59 \n\t" // x3_r * y3_r , x3_i * y3_i -- -- "xvmaddadp 33, 44, 60 \n\t" // x0_r * y0_i , x0_i * y0_r -- "xvmaddadp 35, 45, 61 \n\t" // x1_r * y1_i , x1_i * y1_r -- "xvmaddadp 37, 46, 62 \n\t" // x2_r * y2_i , x2_i * y2_r -- "xvmaddadp 39, 47, 63 \n\t" // x3_r * y3_i , x3_i * y3_r -- -- -- "xvadddp 32, 32, 34 \n\t" -- "xvadddp 36, 36, 38 \n\t" -- -- "xvadddp 33, 33, 35 \n\t" -- "xvadddp 37, 37, 39 \n\t" -- -- "xvadddp 32, 32, 36 \n\t" -- "xvadddp 33, 33, 37 \n\t" -- -- "stxvd2x 32, 0, %4 \n\t" -- "stxvd2x 33, %5, %4 \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (n), // 1 -- "r" (x1), // 2 -- "r" (y1), // 3 -- "r" (dot), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (pre) // 8 -- : "cr0", "%0", "%2" , "%3", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ "dcbt 0, %3 \n\t" -+ -+ "xxlxor 32, 32, 32 \n\t" -+ "xxlxor 33, 33, 33 \n\t" -+ "xxlxor 34, 34, 34 \n\t" -+ "xxlxor 35, 35, 35 \n\t" -+ "xxlxor 36, 36, 36 \n\t" -+ "xxlxor 37, 37, 37 \n\t" -+ "xxlxor 38, 38, 38 \n\t" -+ "xxlxor 39, 39, 39 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -+ "lxvd2x 48, 0, %3 \n\t" // y0_r, y0_i -+ "lxvd2x 41, %7, %2 \n\t" // x1_r, x1_i -+ "lxvd2x 49, %7, %3 \n\t" // y1_r, y1_i -+ "lxvd2x 42, %8, %2 \n\t" // x2_r, x2_i -+ "lxvd2x 50, %8, %3 \n\t" // y2_r, y2_i -+ "lxvd2x 43, %9, %2 \n\t" // x3_r, x3_i -+ "lxvd2x 51, %9, %3 \n\t" // y3_r, y3_i -+ -+ "xxswapd 0, 48 \n\t" // y0_i, y0_r -+ "xxswapd 1, 49 \n\t" // y1_i, y1_r -+ "xxswapd 2, 50 \n\t" // y2_i, y2_r -+ "xxswapd 3, 51 \n\t" // y3_i, y3_r -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" -+ -+ "lxvd2x 44, 0, %2 \n\t" // x0_r, x0_i -+ "lxvd2x 4, 0, %3 \n\t" // y0_r, y0_i -+ "lxvd2x 45, %7, %2 \n\t" // x1_r, x1_i -+ "lxvd2x 5, %7, %3 \n\t" // y1_r, y1_i -+ "lxvd2x 46, %8, %2 \n\t" // x2_r, x2_i -+ "lxvd2x 6, %8, %3 \n\t" // y2_r, y2_i -+ "lxvd2x 47, %9, %2 \n\t" // x3_r, x3_i -+ "lxvd2x 7, %9, %3 \n\t" // y3_r, y3_i -+ -+ "xxswapd 8, 4 \n\t" // y0_i, y0_r -+ "xxswapd 9, 5 \n\t" // y1_i, y1_r -+ "xxswapd 10, 6 \n\t" // y2_i, y2_r -+ "xxswapd 11, 7 \n\t" // y3_i, y3_r -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" -+ -+ "addic. %1, %1, -8 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmaddadp 32, 40, 48 \n\t" // x0_r * y0_r , x0_i * y0_i -+ "lxvd2x 48, 0, %3 \n\t" // y0_r, y0_i -+ "xvmaddadp 34, 41, 49 \n\t" // x1_r * y1_r , x1_i * y1_i -+ "lxvd2x 49, %7, %3 \n\t" // y1_r, y1_i -+ -+ "xvmaddadp 36, 42, 50 \n\t" // x2_r * y2_r , x2_i * y2_i -+ "lxvd2x 50, %8, %3 \n\t" // y2_r, y2_i -+ "xvmaddadp 38, 43, 51 \n\t" // x3_r * y3_r , x3_i * y3_i -+ "lxvd2x 51, %9, %3 \n\t" // y3_r, y3_i -+ -+ "xvmaddadp 33, 40, 0 \n\t" // x0_r * y0_i , x0_i * y0_r -+ "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -+ "xvmaddadp 35, 41, 1 \n\t" // x1_r * y1_i , x1_i * y1_r -+ "lxvd2x 41, %7, %2 \n\t" // x1_r, x1_i -+ -+ "xvmaddadp 37, 42, 2 \n\t" // x2_r * y2_i , x2_i * y2_r -+ "lxvd2x 42, %8, %2 \n\t" // x2_r, x2_i -+ "xvmaddadp 39, 43, 3 \n\t" // x3_r * y3_i , x3_i * y3_r -+ "lxvd2x 43, %9, %2 \n\t" // x3_r, x3_i -+ -+ "xxswapd 0,48 \n\t" // y0_i, y0_r -+ "xxswapd 1,49 \n\t" // y1_i, y1_r -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" -+ -+ "xxswapd 2,50 \n\t" // y2_i, y2_r -+ "xxswapd 3,51 \n\t" // y3_i, y3_r -+ -+ "xvmaddadp 32, 44, 4 \n\t" // x0_r * y0_r , x0_i * y0_i -+ "lxvd2x 4, 0, %3 \n\t" // y0_r, y0_i -+ "xvmaddadp 34, 45, 5 \n\t" // x1_r * y1_r , x1_i * y1_i -+ "lxvd2x 5, %7, %3 \n\t" // y1_r, y1_i -+ "xvmaddadp 36, 46, 6 \n\t" // x2_r * y2_r , x2_i * y2_i -+ "lxvd2x 6, %8, %3 \n\t" // y2_r, y2_i -+ "xvmaddadp 38, 47, 7 \n\t" // x3_r * y3_r , x3_i * y3_i -+ "lxvd2x 7, %9, %3 \n\t" // y3_r, y3_i -+ -+ "xvmaddadp 33, 44, 8 \n\t" // x0_r * y0_i , x0_i * y0_r -+ "lxvd2x 44, 0, %2 \n\t" // x0_r, x0_i -+ "xvmaddadp 35, 45, 9 \n\t" // x1_r * y1_i , x1_i * y1_r -+ "lxvd2x 45, %7, %2 \n\t" // x1_r, x1_i -+ "xvmaddadp 37, 46, 10 \n\t" // x2_r * y2_i , x2_i * y2_r -+ "lxvd2x 46, %8, %2 \n\t" // x2_r, x2_i -+ "xvmaddadp 39, 47, 11 \n\t" // x3_r * y3_i , x3_i * y3_r -+ "lxvd2x 47, %9, %2 \n\t" // x3_r, x3_i -+ -+ "xxswapd 8,4 \n\t" // y0_i, y0_r -+ "xxswapd 9,5 \n\t" // y1_i, y1_r -+ -+ "addi %2, %2, 64 \n\t" -+ "addi %3, %3, 64 \n\t" -+ -+ "xxswapd 10,6 \n\t" // y2_i, y2_r -+ "xxswapd 11,7 \n\t" // y3_i, y3_r -+ -+ "addic. %1, %1, -8 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmaddadp 32, 40, 48 \n\t" // x0_r * y0_r , x0_i * y0_i -+ "xvmaddadp 34, 41, 49 \n\t" // x1_r * y1_r , x1_i * y1_i -+ "xvmaddadp 36, 42, 50 \n\t" // x2_r * y2_r , x2_i * y2_i -+ "xvmaddadp 38, 43, 51 \n\t" // x3_r * y3_r , x3_i * y3_i -+ -+ "xvmaddadp 33, 40, 0 \n\t" // x0_r * y0_i , x0_i * y0_r -+ "xvmaddadp 35, 41, 1 \n\t" // x1_r * y1_i , x1_i * y1_r -+ "xvmaddadp 37, 42, 2 \n\t" // x2_r * y2_i , x2_i * y2_r -+ "xvmaddadp 39, 43, 3 \n\t" // x3_r * y3_i , x3_i * y3_r -+ -+ "xvmaddadp 32, 44, 4 \n\t" // x0_r * y0_r , x0_i * y0_i -+ "xvmaddadp 34, 45, 5 \n\t" // x1_r * y1_r , x1_i * y1_i -+ "xvmaddadp 36, 46, 6 \n\t" // x2_r * y2_r , x2_i * y2_i -+ "xvmaddadp 38, 47, 7 \n\t" // x3_r * y3_r , x3_i * y3_i -+ -+ "xvmaddadp 33, 44, 8 \n\t" // x0_r * y0_i , x0_i * y0_r -+ "xvmaddadp 35, 45, 9 \n\t" // x1_r * y1_i , x1_i * y1_r -+ "xvmaddadp 37, 46, 10 \n\t" // x2_r * y2_i , x2_i * y2_r -+ "xvmaddadp 39, 47, 11 \n\t" // x3_r * y3_i , x3_i * y3_r -+ -+ "xvadddp 32, 32, 34 \n\t" -+ "xvadddp 36, 36, 38 \n\t" -+ -+ "xvadddp 33, 33, 35 \n\t" -+ "xvadddp 37, 37, 39 \n\t" -+ -+ "xvadddp 32, 32, 36 \n\t" -+ "xvadddp 33, 33, 37 \n\t" -+ -+ "stxvd2x 32, 0, %6 \n\t" -+ "stxvd2x 33, %7, %6 \n" -+ -+ "#n=%1 x=%4=%2 y=%5=%3 dot=%0=%6 o16=%7 o32=%8 o48=%9" -+ : -+ "=m" (*dot), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "+b" (y) // 3 -+ : -+ "m" (*x), -+ "m" (*y), -+ "b" (dot), // 6 -+ "b" (16), // 7 -+ "b" (32), // 8 -+ "b" (48) // 9 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51","vs0","vs1","vs2","vs3", -+ "vs4","vs5","vs6","vs7","vs8","vs9","vs10","vs11" -+ ); -+} -diff --git a/kernel/power/zscal.c b/kernel/power/zscal.c -index 410fc98..14d677f 100644 ---- a/kernel/power/zscal.c -+++ b/kernel/power/zscal.c -@@ -47,15 +47,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #ifndef HAVE_KERNEL_8 - --static void zscal_kernel_8(BLASLONG n, FLOAT *x, FLOAT *alpha) -+static void zscal_kernel_8(BLASLONG n, FLOAT *x, FLOAT da_r, FLOAT da_i) - { - - BLASLONG i=0; - FLOAT *x1=x; -- FLOAT alpha_r1=alpha[0]; -- FLOAT alpha_r2=alpha[1]; -- FLOAT alpha_i1=alpha[2]; -- FLOAT alpha_i2=alpha[3]; -+ FLOAT alpha_r1=da_r; -+ FLOAT alpha_r2=da_r; -+ FLOAT alpha_i1=-da_i; -+ FLOAT alpha_i2=da_i; - FLOAT temp00, temp01, temp10, temp11, temp20, temp21, temp30, temp31; - FLOAT x0_r, x0_i, x1_r, x1_i, x2_r, x2_i, x3_r, x3_i; - -@@ -116,7 +116,6 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r,FLOAT da_i, F - BLASLONG inc_x2; - BLASLONG ip = 0; - FLOAT temp; -- FLOAT alpha[4] __attribute__ ((aligned (16)));; - BLASLONG n1; - - if ( n <= 0 ) -@@ -147,11 +146,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r,FLOAT da_i, F - n1 = n & -8; - if ( n1 > 0 ) - { -- alpha[0] = da_r; -- alpha[1] = da_r; -- alpha[2] = -da_i; -- alpha[3] = da_i; -- zscal_kernel_8(n1, x, alpha); -+ zscal_kernel_8(n1, x, da_r, da_i); - i=n1; - ip = n1 * 2; - -diff --git a/kernel/power/zscal_microk_power8.c b/kernel/power/zscal_microk_power8.c -index 5e09d8d..aba9029 100644 ---- a/kernel/power/zscal_microk_power8.c -+++ b/kernel/power/zscal_microk_power8.c -@@ -38,187 +38,202 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_8 1 - --static void zscal_kernel_8( BLASLONG n, FLOAT *x, FLOAT *alpha) __attribute__ ((noinline)); -- --static void zscal_kernel_8( BLASLONG n, FLOAT *x, FLOAT *alpha) -+static void zscal_kernel_8 (long n, double *x, double alpha_r, double alpha_i) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *x2=x+1; -- BLASLONG pre = 384; -- -- __asm__ __volatile__ -- ( -- -- "lxvd2x 32, 0, %3 \n\t" // alpha_r , alpha_r -- "lxvd2x 33, %5, %3 \n\t" // -alpha_i , alpha_i -- "addi %1, %1, -8 \n\t" -- -- "dcbt %2, %4 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "ble 2f \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "dcbt %2, %4 \n\t" -- -- "xvmuldp 48, 40, 32 \n\t" // x0_r * alpha_r, x0_i * alpha_r -- "xvmuldp 49, 41, 32 \n\t" -- "xvmuldp 50, 42, 32 \n\t" -- "xvmuldp 51, 43, 32 \n\t" -- "xvmuldp 52, 44, 32 \n\t" -- "xvmuldp 53, 45, 32 \n\t" -- "xvmuldp 54, 46, 32 \n\t" -- "xvmuldp 55, 47, 32 \n\t" -- -- "xxswapd 56, 40 \n\t" -- "xxswapd 57, 41 \n\t" -- "xxswapd 58, 42 \n\t" -- "xxswapd 59, 43 \n\t" -- "xxswapd 60, 44 \n\t" -- "xxswapd 61, 45 \n\t" -- "xxswapd 62, 46 \n\t" -- "xxswapd 63, 47 \n\t" -- -- "xvmuldp 56, 56, 33 \n\t" // x0_i * -alpha_i, x0_r * alpha_i -- "xvmuldp 57, 57, 33 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -- "lxvd2x 41, %5, %2 \n\t" -- -- "xvmuldp 58, 58, 33 \n\t" -- "xvmuldp 59, 59, 33 \n\t" -- -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- -- "xvmuldp 60, 60, 33 \n\t" -- "xvmuldp 61, 61, 33 \n\t" -- -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- -- "xvmuldp 62, 62, 33 \n\t" -- "xvmuldp 63, 63, 33 \n\t" -- -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "xvadddp 48, 48 , 56 \n\t" -- "xvadddp 49, 49 , 57 \n\t" -- "xvadddp 50, 50 , 58 \n\t" -- "xvadddp 51, 51 , 59 \n\t" -- -- "stxvd2x 48, 0, %1 \n\t" -- "stxvd2x 49, %5, %1 \n\t" -- -- "xvadddp 52, 52 , 60 \n\t" -- "xvadddp 53, 53 , 61 \n\t" -- -- "stxvd2x 50, %6, %1 \n\t" -- "stxvd2x 51, %7, %1 \n\t" -- -- "xvadddp 54, 54 , 62 \n\t" -- "xvadddp 55, 55 , 63 \n\t" -- -- "stxvd2x 52, %8, %1 \n\t" -- "stxvd2x 53, %9, %1 \n\t" -- "stxvd2x 54, %10, %1 \n\t" -- "stxvd2x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- "addi %2, %2, 128 \n\t" -- -- "addic. %0 , %0 , -8 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- "xvmuldp 48, 40, 32 \n\t" // x0_r * alpha_r, x0_i * alpha_r -- "xvmuldp 49, 41, 32 \n\t" -- "xvmuldp 50, 42, 32 \n\t" -- "xvmuldp 51, 43, 32 \n\t" -- "xvmuldp 52, 44, 32 \n\t" -- "xvmuldp 53, 45, 32 \n\t" -- "xvmuldp 54, 46, 32 \n\t" -- "xvmuldp 55, 47, 32 \n\t" -- -- "xxswapd 56, 40 \n\t" -- "xxswapd 57, 41 \n\t" -- "xxswapd 58, 42 \n\t" -- "xxswapd 59, 43 \n\t" -- "xxswapd 60, 44 \n\t" -- "xxswapd 61, 45 \n\t" -- "xxswapd 62, 46 \n\t" -- "xxswapd 63, 47 \n\t" -- -- "xvmuldp 56, 56, 33 \n\t" // x0_i * -alpha_i, x0_r * alpha_i -- "xvmuldp 57, 57, 33 \n\t" -- "xvmuldp 58, 58, 33 \n\t" -- "xvmuldp 59, 59, 33 \n\t" -- "xvmuldp 60, 60, 33 \n\t" -- "xvmuldp 61, 61, 33 \n\t" -- "xvmuldp 62, 62, 33 \n\t" -- "xvmuldp 63, 63, 33 \n\t" -- -- "xvadddp 48, 48 , 56 \n\t" -- "xvadddp 49, 49 , 57 \n\t" -- "xvadddp 50, 50 , 58 \n\t" -- "xvadddp 51, 51 , 59 \n\t" -- "xvadddp 52, 52 , 60 \n\t" -- "xvadddp 53, 53 , 61 \n\t" -- "xvadddp 54, 54 , 62 \n\t" -- "xvadddp 55, 55 , 63 \n\t" -- -- "stxvd2x 48, 0, %1 \n\t" -- "stxvd2x 49, %5, %1 \n\t" -- "stxvd2x 50, %6, %1 \n\t" -- "stxvd2x 51, %7, %1 \n\t" -- "stxvd2x 52, %8, %1 \n\t" -- "stxvd2x 53, %9, %1 \n\t" -- "stxvd2x 54, %10, %1 \n\t" -- "stxvd2x 55, %11, %1 \n\t" -- -- -- : -- : -- "r" (i), // 0 -- "r" (x2), // 1 -- "r" (x1), // 2 -- "r" (alpha), // 3 -- "r" (pre), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "memory" -- ); -- --} -- -- -+ __vector double t0; -+ __vector double t1; -+ __vector double t2; -+ __vector double t3; -+ __vector double t4; -+ __vector double t5; -+ __vector double t6; -+ __vector double t7; -+ __vector double t8; -+ __vector double t9; -+ __vector double t10; -+ __vector double t11; -+ -+ __asm__ -+ ( -+ "dcbt 0, %2 \n\t" -+ -+ "xsnegdp 33, %x16 \n\t" // -alpha_i -+ "xxspltd 32, %x15, 0 \n\t" // alpha_r , alpha_r -+ "xxmrghd 33, 33, %x16 \n\t" // -alpha_i , alpha_i -+ -+ "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -+ "lxvd2x 41, %17, %2 \n\t" -+ "lxvd2x 42, %18, %2 \n\t" -+ "lxvd2x 43, %19, %2 \n\t" -+ "lxvd2x 44, %20, %2 \n\t" -+ "lxvd2x 45, %21, %2 \n\t" -+ "lxvd2x 46, %22, %2 \n\t" -+ "lxvd2x 47, %23, %2 \n\t" -+ -+ "addi %2, %2, 128 \n\t" -+ -+ "addic. %1, %1, -8 \n\t" -+ "ble 2f \n\t" -+ -+ ".p2align 5 \n" -+ "1: \n\t" -+ -+ "xvmuldp 48, 40, 32 \n\t" // x0_r * alpha_r, x0_i * alpha_r -+ "xvmuldp 49, 41, 32 \n\t" -+ "xvmuldp 50, 42, 32 \n\t" -+ "xvmuldp 51, 43, 32 \n\t" -+ "xvmuldp %x3, 44, 32 \n\t" -+ "xvmuldp %x4, 45, 32 \n\t" -+ "xvmuldp %x5, 46, 32 \n\t" -+ "xvmuldp %x6, 47, 32 \n\t" -+ -+ "xxswapd %x7, 40 \n\t" -+ "xxswapd %x8, 41 \n\t" -+ "xxswapd %x9, 42 \n\t" -+ "xxswapd %x10, 43 \n\t" -+ "xxswapd %x11, 44 \n\t" -+ "xxswapd %x12, 45 \n\t" -+ "xxswapd %x13, 46 \n\t" -+ "xxswapd %x14, 47 \n\t" -+ -+ "xvmuldp %x7, %x7, 33 \n\t" // x0_i * -alpha_i, x0_r * alpha_i -+ "xvmuldp %x8, %x8, 33 \n\t" -+ -+ "lxvd2x 40, 0, %2 \n\t" // x0_r, x0_i -+ "lxvd2x 41, %17, %2 \n\t" -+ -+ "xvmuldp %x9, %x9, 33 \n\t" -+ "xvmuldp %x10, %x10, 33 \n\t" -+ -+ "lxvd2x 42, %18, %2 \n\t" -+ "lxvd2x 43, %19, %2 \n\t" -+ -+ "xvmuldp %x11, %x11, 33 \n\t" -+ "xvmuldp %x12, %x12, 33 \n\t" -+ -+ "lxvd2x 44, %20, %2 \n\t" -+ "lxvd2x 45, %21, %2 \n\t" -+ -+ "xvmuldp %x13, %x13, 33 \n\t" -+ "xvmuldp %x14, %x14, 33 \n\t" -+ -+ "lxvd2x 46, %22, %2 \n\t" -+ "lxvd2x 47, %23, %2 \n\t" -+ -+ "addi %2, %2, -128 \n\t" -+ -+ "xvadddp 48, 48, %x7 \n\t" -+ "xvadddp 49, 49, %x8 \n\t" -+ "xvadddp 50, 50, %x9 \n\t" -+ "xvadddp 51, 51, %x10 \n\t" -+ -+ "stxvd2x 48, 0, %2 \n\t" -+ "stxvd2x 49, %17, %2 \n\t" -+ -+ "xvadddp %x3, %x3, %x11 \n\t" -+ "xvadddp %x4, %x4, %x12 \n\t" -+ -+ "stxvd2x 50, %18, %2 \n\t" -+ "stxvd2x 51, %19, %2 \n\t" -+ -+ "xvadddp %x5, %x5, %x13 \n\t" -+ "xvadddp %x6, %x6, %x14 \n\t" -+ -+ "stxvd2x %x3, %20, %2 \n\t" -+ "stxvd2x %x4, %21, %2 \n\t" -+ "stxvd2x %x5, %22, %2 \n\t" -+ "stxvd2x %x6, %23, %2 \n\t" -+ -+ "addi %2, %2, 256 \n\t" -+ -+ "addic. %1, %1, -8 \n\t" -+ "bgt 1b \n" -+ -+ "2: \n\t" -+ -+ "xvmuldp 48, 40, 32 \n\t" // x0_r * alpha_r, x0_i * alpha_r -+ "xvmuldp 49, 41, 32 \n\t" -+ "xvmuldp 50, 42, 32 \n\t" -+ "xvmuldp 51, 43, 32 \n\t" -+ "xvmuldp %x3, 44, 32 \n\t" -+ "xvmuldp %x4, 45, 32 \n\t" -+ "xvmuldp %x5, 46, 32 \n\t" -+ "xvmuldp %x6, 47, 32 \n\t" -+ -+ "xxswapd %x7, 40 \n\t" -+ "xxswapd %x8, 41 \n\t" -+ "xxswapd %x9, 42 \n\t" -+ "xxswapd %x10, 43 \n\t" -+ "xxswapd %x11, 44 \n\t" -+ "xxswapd %x12, 45 \n\t" -+ "xxswapd %x13, 46 \n\t" -+ "xxswapd %x14, 47 \n\t" -+ -+ "addi %2, %2, -128 \n\t" -+ -+ "xvmuldp %x7, %x7, 33 \n\t" // x0_i * -alpha_i, x0_r * alpha_i -+ "xvmuldp %x8, %x8, 33 \n\t" -+ "xvmuldp %x9, %x9, 33 \n\t" -+ "xvmuldp %x10, %x10, 33 \n\t" -+ "xvmuldp %x11, %x11, 33 \n\t" -+ "xvmuldp %x12, %x12, 33 \n\t" -+ "xvmuldp %x13, %x13, 33 \n\t" -+ "xvmuldp %x14, %x14, 33 \n\t" -+ -+ "xvadddp 48, 48, %x7 \n\t" -+ "xvadddp 49, 49, %x8 \n\t" -+ "xvadddp 50, 50, %x9 \n\t" -+ "xvadddp 51, 51, %x10 \n\t" -+ -+ "stxvd2x 48, 0, %2 \n\t" -+ "stxvd2x 49, %17, %2 \n\t" -+ -+ "xvadddp %x3, %x3, %x11 \n\t" -+ "xvadddp %x4, %x4, %x12 \n\t" -+ -+ "stxvd2x 50, %18, %2 \n\t" -+ "stxvd2x 51, %19, %2 \n\t" -+ -+ "xvadddp %x5, %x5, %x13 \n\t" -+ "xvadddp %x6, %x6, %x14 \n\t" -+ -+ "stxvd2x %x3, %20, %2 \n\t" -+ "stxvd2x %x4, %21, %2 \n\t" -+ "stxvd2x %x5, %22, %2 \n\t" -+ "stxvd2x %x6, %23, %2 \n" -+ -+ "#n=%1 x=%0=%2 alpha=(%15,%16) o16=%17 o32=%18 o48=%19 o64=%20 o80=%21 o96=%22 o112=%23\n" -+ "#t0=%x3 t1=%x4 t2=%x5 t3=%x6 t4=%x7 t5=%x8 t6=%x9 t7=%x10 t8=%x11 t9=%x12 t10=%x13 t11=%x14" -+ : -+ "+m" (*x), -+ "+r" (n), // 1 -+ "+b" (x), // 2 -+ "=wa" (t0), // 3 -+ "=wa" (t1), // 4 -+ "=wa" (t2), // 5 -+ "=wa" (t3), // 6 -+ "=wa" (t4), // 7 -+ "=wa" (t5), // 8 -+ "=wa" (t6), // 9 -+ "=wa" (t7), // 10 -+ "=wa" (t8), // 11 -+ "=wa" (t9), // 12 -+ "=wa" (t10), // 13 -+ "=wa" (t11) // 14 -+ : -+ "d" (alpha_r), // 15 -+ "d" (alpha_i), // 16 -+ "b" (16), // 17 -+ "b" (32), // 18 -+ "b" (48), // 19 -+ "b" (64), // 20 -+ "b" (80), // 21 -+ "b" (96), // 22 -+ "b" (112) // 23 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51" -+ ); -+} -diff --git a/kernel/power/zswap_microk_power8.c b/kernel/power/zswap_microk_power8.c -index 9e56237..54391ba 100644 ---- a/kernel/power/zswap_microk_power8.c -+++ b/kernel/power/zswap_microk_power8.c -@@ -35,146 +35,123 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - #define HAVE_KERNEL_16 1 - --static void zswap_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y) __attribute__ ((noinline)); -- --static void zswap_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y) -+static void -+zswap_kernel_16 (long n, double *x, double *y) - { -- -- -- BLASLONG i = n; -- BLASLONG o16 = 16; -- BLASLONG o32 = 32; -- BLASLONG o48 = 48; -- BLASLONG o64 = 64; -- BLASLONG o80 = 80; -- BLASLONG o96 = 96; -- BLASLONG o112 = 112; -- FLOAT *x1=x; -- FLOAT *y1=y; -- FLOAT *x2=x+1; -- FLOAT *y2=y+1; -- BLASLONG pre = 384; -- BLASLONG alpha=0; -- -- __asm__ __volatile__ -- ( -- -- "addi %3, %3, -8 \n\t" -- "addi %4, %4, -8 \n\t" -- -- ".align 5 \n\t" -- "1: \n\t" -- -- "lxvd2x 32, 0, %2 \n\t" -- "lxvd2x 33, %5, %2 \n\t" -- "lxvd2x 34, %6, %2 \n\t" -- "lxvd2x 35, %7, %2 \n\t" -- "lxvd2x 36, %8, %2 \n\t" -- "lxvd2x 37, %9, %2 \n\t" -- "lxvd2x 38, %10, %2 \n\t" -- "lxvd2x 39, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvd2x 40, 0, %2 \n\t" -- "lxvd2x 41, %5, %2 \n\t" -- "lxvd2x 42, %6, %2 \n\t" -- "lxvd2x 43, %7, %2 \n\t" -- "lxvd2x 44, %8, %2 \n\t" -- "lxvd2x 45, %9, %2 \n\t" -- "lxvd2x 46, %10, %2 \n\t" -- "lxvd2x 47, %11, %2 \n\t" -- -- "addi %2, %2, 128 \n\t" -- -- "lxvd2x 48, 0, %1 \n\t" -- "lxvd2x 49, %5, %1 \n\t" -- "lxvd2x 50, %6, %1 \n\t" -- "lxvd2x 51, %7, %1 \n\t" -- "lxvd2x 52, %8, %1 \n\t" -- "lxvd2x 53, %9, %1 \n\t" -- "lxvd2x 54, %10, %1 \n\t" -- "lxvd2x 55, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "lxvd2x 56, 0, %1 \n\t" -- "lxvd2x 57, %5, %1 \n\t" -- "lxvd2x 58, %6, %1 \n\t" -- "lxvd2x 59, %7, %1 \n\t" -- "lxvd2x 60, %8, %1 \n\t" -- "lxvd2x 61, %9, %1 \n\t" -- "lxvd2x 62, %10, %1 \n\t" -- "lxvd2x 63, %11, %1 \n\t" -- -- "addi %1, %1, 128 \n\t" -- -- "stxvd2x 32, 0, %3 \n\t" -- "stxvd2x 33, %5, %3 \n\t" -- "stxvd2x 34, %6, %3 \n\t" -- "stxvd2x 35, %7, %3 \n\t" -- "stxvd2x 36, %8, %3 \n\t" -- "stxvd2x 37, %9, %3 \n\t" -- "stxvd2x 38, %10, %3 \n\t" -- "stxvd2x 39, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvd2x 40, 0, %3 \n\t" -- "stxvd2x 41, %5, %3 \n\t" -- "stxvd2x 42, %6, %3 \n\t" -- "stxvd2x 43, %7, %3 \n\t" -- "stxvd2x 44, %8, %3 \n\t" -- "stxvd2x 45, %9, %3 \n\t" -- "stxvd2x 46, %10, %3 \n\t" -- "stxvd2x 47, %11, %3 \n\t" -- -- "addi %3, %3, 128 \n\t" -- -- "stxvd2x 48, 0, %4 \n\t" -- "stxvd2x 49, %5, %4 \n\t" -- "stxvd2x 50, %6, %4 \n\t" -- "stxvd2x 51, %7, %4 \n\t" -- "stxvd2x 52, %8, %4 \n\t" -- "stxvd2x 53, %9, %4 \n\t" -- "stxvd2x 54, %10, %4 \n\t" -- "stxvd2x 55, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "stxvd2x 56, 0, %4 \n\t" -- "stxvd2x 57, %5, %4 \n\t" -- "stxvd2x 58, %6, %4 \n\t" -- "stxvd2x 59, %7, %4 \n\t" -- "stxvd2x 60, %8, %4 \n\t" -- "stxvd2x 61, %9, %4 \n\t" -- "stxvd2x 62, %10, %4 \n\t" -- "stxvd2x 63, %11, %4 \n\t" -- -- "addi %4, %4, 128 \n\t" -- -- "addic. %0 , %0 , -16 \n\t" -- "bgt 1b \n\t" -- -- "2: \n\t" -- -- : -- : -- "r" (i), // 0 -- "r" (y1), // 1 -- "r" (x1), // 2 -- "r" (y2), // 3 -- "r" (x2), // 4 -- "r" (o16), // 5 -- "r" (o32), // 6 -- "r" (o48), // 7 -- "r" (o64), // 8 -- "r" (o80), // 9 -- "r" (o96), // 10 -- "r" (o112) // 11 -- : "cr0", "%0", "%2" , "%1", "%3", "%4", "memory" -- ); -- --} -- -- -+ __asm__ -+ ( -+ ".p2align 5 \n" -+ "1: \n\t" -+ "lxvd2x 32, 0, %4 \n\t" -+ "lxvd2x 33, %5, %4 \n\t" -+ "lxvd2x 34, %6, %4 \n\t" -+ "lxvd2x 35, %7, %4 \n\t" -+ "lxvd2x 36, %8, %4 \n\t" -+ "lxvd2x 37, %9, %4 \n\t" -+ "lxvd2x 38, %10, %4 \n\t" -+ "lxvd2x 39, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "lxvd2x 40, 0, %4 \n\t" -+ "lxvd2x 41, %5, %4 \n\t" -+ "lxvd2x 42, %6, %4 \n\t" -+ "lxvd2x 43, %7, %4 \n\t" -+ "lxvd2x 44, %8, %4 \n\t" -+ "lxvd2x 45, %9, %4 \n\t" -+ "lxvd2x 46, %10, %4 \n\t" -+ "lxvd2x 47, %11, %4 \n\t" -+ -+ "addi %4, %4, -128 \n\t" -+ -+ "lxvd2x 48, 0, %3 \n\t" -+ "lxvd2x 49, %5, %3 \n\t" -+ "lxvd2x 50, %6, %3 \n\t" -+ "lxvd2x 51, %7, %3 \n\t" -+ "lxvd2x 0, %8, %3 \n\t" -+ "lxvd2x 1, %9, %3 \n\t" -+ "lxvd2x 2, %10, %3 \n\t" -+ "lxvd2x 3, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "lxvd2x 4, 0, %3 \n\t" -+ "lxvd2x 5, %5, %3 \n\t" -+ "lxvd2x 6, %6, %3 \n\t" -+ "lxvd2x 7, %7, %3 \n\t" -+ "lxvd2x 8, %8, %3 \n\t" -+ "lxvd2x 9, %9, %3 \n\t" -+ "lxvd2x 10, %10, %3 \n\t" -+ "lxvd2x 11, %11, %3 \n\t" -+ -+ "addi %3, %3, -128 \n\t" -+ -+ "stxvd2x 32, 0, %3 \n\t" -+ "stxvd2x 33, %5, %3 \n\t" -+ "stxvd2x 34, %6, %3 \n\t" -+ "stxvd2x 35, %7, %3 \n\t" -+ "stxvd2x 36, %8, %3 \n\t" -+ "stxvd2x 37, %9, %3 \n\t" -+ "stxvd2x 38, %10, %3 \n\t" -+ "stxvd2x 39, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvd2x 40, 0, %3 \n\t" -+ "stxvd2x 41, %5, %3 \n\t" -+ "stxvd2x 42, %6, %3 \n\t" -+ "stxvd2x 43, %7, %3 \n\t" -+ "stxvd2x 44, %8, %3 \n\t" -+ "stxvd2x 45, %9, %3 \n\t" -+ "stxvd2x 46, %10, %3 \n\t" -+ "stxvd2x 47, %11, %3 \n\t" -+ -+ "addi %3, %3, 128 \n\t" -+ -+ "stxvd2x 48, 0, %4 \n\t" -+ "stxvd2x 49, %5, %4 \n\t" -+ "stxvd2x 50, %6, %4 \n\t" -+ "stxvd2x 51, %7, %4 \n\t" -+ "stxvd2x 0, %8, %4 \n\t" -+ "stxvd2x 1, %9, %4 \n\t" -+ "stxvd2x 2, %10, %4 \n\t" -+ "stxvd2x 3, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ -+ "stxvd2x 4, 0, %4 \n\t" -+ "stxvd2x 5, %5, %4 \n\t" -+ "stxvd2x 6, %6, %4 \n\t" -+ "stxvd2x 7, %7, %4 \n\t" -+ "stxvd2x 8, %8, %4 \n\t" -+ "stxvd2x 9, %9, %4 \n\t" -+ "stxvd2x 10, %10, %4 \n\t" -+ "stxvd2x 11, %11, %4 \n\t" -+ -+ "addi %4, %4, 128 \n\t" -+ "addic. %2, %2, -16 \n\t" -+ "bgt 1b \n" -+ -+ "#n=%2 x=%0=%3 y=%1=%4 o16=%5 o32=%6 o48=%7 o64=%8 o80=%9 o96=%10 o112=%11" -+ : -+ "+m" (*x), -+ "+m" (*y), -+ "+r" (n), // 2 -+ "+b" (x), // 3 -+ "+b" (y) // 4 -+ : -+ "b" (16), // 5 -+ "b" (32), // 6 -+ "b" (48), // 7 -+ "b" (64), // 8 -+ "b" (80), // 9 -+ "b" (96), // 10 -+ "b" (112) // 11 -+ : -+ "cr0", -+ "vs32","vs33","vs34","vs35","vs36","vs37","vs38","vs39", -+ "vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47", -+ "vs48","vs49","vs50","vs51","vs0","vs1","vs2","vs3", -+ "vs4","vs5","vs6","vs7","vs8","vs9","vs10","vs11" -+ ); -+} diff --git a/openblas-0.2.19-s390x.patch b/openblas-0.2.19-s390x.patch deleted file mode 100644 index 623637b..0000000 --- a/openblas-0.2.19-s390x.patch +++ /dev/null @@ -1,4152 +0,0 @@ -From c4b61f74f18c674c69301122ba95bdbca6f55d0f Mon Sep 17 00:00:00 2001 -From: Zhang Xianyi -Date: Fri, 15 Apr 2016 18:02:24 -0400 -Subject: [PATCH 1/6] Init IBM z system (s390x) porting. - -(cherry picked from commit dd43661cfd5d3de6e9fe804587b89f1094c85e41) ---- - Makefile.zarch | 6 ++ - c_check | 8 +++ - common.h | 4 ++ - common_linux.h | 4 +- - common_zarch.h | 139 ++++++++++++++++++++++++++++++++++++++ - cpuid_zarch.c | 91 +++++++++++++++++++++++++ - ctest.c | 4 ++ - getarch.c | 10 ++- - kernel/zarch/KERNEL | 30 ++++++++ - kernel/zarch/KERNEL.ZARCH_GENERIC | 134 ++++++++++++++++++++++++++++++++++++ - kernel/zarch/Makefile | 2 + - param.h | 39 +++++++++++ - 12 files changed, 467 insertions(+), 4 deletions(-) - create mode 100644 Makefile.zarch - create mode 100644 common_zarch.h - create mode 100644 cpuid_zarch.c - create mode 100644 kernel/zarch/KERNEL - create mode 100644 kernel/zarch/KERNEL.ZARCH_GENERIC - create mode 100644 kernel/zarch/Makefile - -diff --git a/Makefile.zarch b/Makefile.zarch -new file mode 100644 -index 00000000..138c5941 ---- /dev/null -+++ b/Makefile.zarch -@@ -0,0 +1,6 @@ -+ -+ifeq ($(CORE), Z13) -+CCOMMON_OPT += -march=z13 -+FCOMMON_OPT += -march=z13 -+endif -+ -diff --git a/c_check b/c_check -index 2ec9fc48..1bd52201 100644 ---- a/c_check -+++ b/c_check -@@ -10,6 +10,7 @@ $hostarch = "x86_64" if ($hostarch eq "amd64"); - $hostarch = "arm" if ($hostarch =~ /^arm.*/); - $hostarch = "arm64" if ($hostarch eq "aarch64"); - $hostarch = "power" if ($hostarch =~ /^(powerpc|ppc).*/); -+$hostarch = "zarch" if ($hostarch eq "s390x"); - - $tmpf = new File::Temp( UNLINK => 1 ); - $binary = $ENV{"BINARY"}; -@@ -72,6 +73,7 @@ $architecture = sparc if ($data =~ /ARCH_SPARC/); - $architecture = ia64 if ($data =~ /ARCH_IA64/); - $architecture = arm if ($data =~ /ARCH_ARM/); - $architecture = arm64 if ($data =~ /ARCH_ARM64/); -+$architecture = zarch if ($data =~ /ARCH_ZARCH/); - - $defined = 0; - -@@ -96,6 +98,11 @@ if (($architecture eq "arm") || ($architecture eq "arm64")) { - $defined = 1; - } - -+if ($architecture eq "zarch") { -+ $defined = 1; -+ $binary = 64; -+} -+ - if ($architecture eq "alpha") { - $defined = 1; - $binary = 64; -@@ -187,6 +194,7 @@ $architecture = sparc if ($data =~ /ARCH_SPARC/); - $architecture = ia64 if ($data =~ /ARCH_IA64/); - $architecture = arm if ($data =~ /ARCH_ARM/); - $architecture = arm64 if ($data =~ /ARCH_ARM64/); -+$architecture = zarch if ($data =~ /ARCH_ZARCH/); - - $binformat = bin32; - $binformat = bin64 if ($data =~ /BINARY_64/); -diff --git a/common.h b/common.h -index 480174c1..b4acada3 100644 ---- a/common.h -+++ b/common.h -@@ -420,6 +420,10 @@ please https://github.com/xianyi/OpenBLAS/issues/246 - #include "common_arm64.h" - #endif - -+#ifdef ARCH_ZARCH -+#include "common_zarch.h" -+#endif -+ - #ifndef ASSEMBLER - #ifdef OS_WINDOWS - typedef char env_var_t[MAX_PATH]; -diff --git a/common_linux.h b/common_linux.h -index cab5e5f7..35f3fb65 100644 ---- a/common_linux.h -+++ b/common_linux.h -@@ -70,7 +70,7 @@ extern long int syscall (long int __sysno, ...); - static inline int my_mbind(void *addr, unsigned long len, int mode, - unsigned long *nodemask, unsigned long maxnode, - unsigned flags) { --#if defined (__LSB_VERSION__) -+#if defined (__LSB_VERSION__) || defined(ARCH_ZARCH) - // So far, LSB (Linux Standard Base) don't support syscall(). - // https://lsbbugs.linuxfoundation.org/show_bug.cgi?id=3482 - return 0; -@@ -90,7 +90,7 @@ static inline int my_mbind(void *addr, unsigned long len, int mode, - } - - static inline int my_set_mempolicy(int mode, const unsigned long *addr, unsigned long flag) { --#if defined (__LSB_VERSION__) -+#if defined (__LSB_VERSION__) || defined(ARCH_ZARCH) - // So far, LSB (Linux Standard Base) don't support syscall(). - // https://lsbbugs.linuxfoundation.org/show_bug.cgi?id=3482 - return 0; -diff --git a/common_zarch.h b/common_zarch.h -new file mode 100644 -index 00000000..7c04cf42 ---- /dev/null -+++ b/common_zarch.h -@@ -0,0 +1,139 @@ -+/***************************************************************************** -+Copyright (c) 2011-2016, The OpenBLAS Project -+All rights reserved. -+ -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are -+met: -+ -+ 1. Redistributions of source code must retain the above copyright -+ notice, this list of conditions and the following disclaimer. -+ -+ 2. Redistributions in binary form must reproduce the above copyright -+ notice, this list of conditions and the following disclaimer in -+ the documentation and/or other materials provided with the -+ distribution. -+ 3. Neither the name of the OpenBLAS project nor the names of -+ its contributors may be used to endorse or promote products -+ derived from this software without specific prior written -+ permission. -+ -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+**********************************************************************************/ -+ -+#ifndef COMMON_ZARCH -+#define COMMON_ZARCH -+ -+#define MB -+//__asm__ __volatile__ ("dmb ish" : : : "memory") -+#define WMB -+//__asm__ __volatile__ ("dmb ishst" : : : "memory") -+ -+ -+#define INLINE inline -+ -+#define RETURN_BY_COMPLEX -+ -+#ifndef ASSEMBLER -+ -+ /* -+static void __inline blas_lock(volatile BLASULONG *address){ -+ -+ BLASULONG ret; -+ -+ do { -+ while (*address) {YIELDING;}; -+ -+ __asm__ __volatile__( -+ "mov x4, #1 \n\t" -+ "1: \n\t" -+ "ldaxr x2, [%1] \n\t" -+ "cbnz x2, 1b \n\t" -+ "2: \n\t" -+ "stxr w3, x4, [%1] \n\t" -+ "cbnz w3, 1b \n\t" -+ "mov %0, #0 \n\t" -+ : "=r"(ret), "=r"(address) -+ : "1"(address) -+ : "memory", "x2" , "x3", "x4" -+ -+ -+ ); -+ -+ -+ } while (ret); -+ -+} -+ */ -+//#define BLAS_LOCK_DEFINED -+ -+ -+ -+static inline int blas_quickdivide(blasint x, blasint y){ -+ return x / y; -+} -+ -+#if defined(DOUBLE) -+#define GET_IMAGE(res) __asm__ __volatile__("str d1, %0" : "=m"(res) : : "memory") -+#else -+#define GET_IMAGE(res) __asm__ __volatile__("str s1, %0" : "=m"(res) : : "memory") -+#endif -+ -+#define GET_IMAGE_CANCEL -+ -+#endif -+ -+ -+#ifndef F_INTERFACE -+#define REALNAME ASMNAME -+#else -+#define REALNAME ASMFNAME -+#endif -+ -+#if defined(ASSEMBLER) && !defined(NEEDPARAM) -+ -+#define PROLOGUE \ -+ .text ;\ -+ .align 4 ;\ -+ .global REALNAME ;\ -+ .type REALNAME, %function ;\ -+REALNAME: -+ -+#define EPILOGUE -+ -+#define PROFCODE -+ -+#endif -+ -+ -+#define SEEK_ADDRESS -+ -+#ifndef PAGESIZE -+#define PAGESIZE ( 4 << 10) -+#endif -+#define HUGE_PAGESIZE ( 4 << 20) -+ -+#if defined(CORTEXA57) -+#define BUFFER_SIZE (20 << 20) -+#else -+#define BUFFER_SIZE (16 << 20) -+#endif -+ -+ -+#define BASE_ADDRESS (START_ADDRESS - BUFFER_SIZE * MAX_CPU_NUMBER) -+ -+#ifndef MAP_ANONYMOUS -+#define MAP_ANONYMOUS MAP_ANON -+#endif -+ -+#endif -+ -diff --git a/cpuid_zarch.c b/cpuid_zarch.c -new file mode 100644 -index 00000000..248cd47e ---- /dev/null -+++ b/cpuid_zarch.c -@@ -0,0 +1,91 @@ -+/************************************************************************** -+ Copyright (c) 2016, The OpenBLAS Project -+ All rights reserved. -+ Redistribution and use in source and binary forms, with or without -+ modification, are permitted provided that the following conditions are -+ met: -+ 1. Redistributions of source code must retain the above copyright -+ notice, this list of conditions and the following disclaimer. -+ 2. Redistributions in binary form must reproduce the above copyright -+ notice, this list of conditions and the following disclaimer in -+ the documentation and/or other materials provided with the -+ distribution. -+ 3. Neither the name of the OpenBLAS project nor the names of -+ its contributors may be used to endorse or promote products -+ derived from this software without specific prior written permission. -+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ *****************************************************************************/ -+ -+#include -+ -+#define CPU_GENERIC 0 -+#define CPU_Z13 1 -+ -+static char *cpuname[] = { -+ "ZARCH_GENERIC", -+ "Z13" -+}; -+ -+static char *cpuname_lower[] = { -+ "zarch_generic", -+ "z13" -+}; -+ -+int detect(void) -+{ -+ return CPU_GENERIC; -+} -+ -+void get_libname(void) -+{ -+ -+ int d = detect(); -+ printf("%s", cpuname_lower[d]); -+} -+ -+char *get_corename(void) -+{ -+ return cpuname[detect()]; -+} -+ -+void get_architecture(void) -+{ -+ printf("ZARCH"); -+} -+ -+void get_subarchitecture(void) -+{ -+ int d = detect(); -+ printf("%s", cpuname[d]); -+} -+ -+void get_subdirname(void) -+{ -+ printf("zarch"); -+} -+ -+ -+void get_cpuconfig(void) -+{ -+ -+ int d = detect(); -+ switch (d){ -+ case CPU_GENERIC: -+ printf("#define ZARCH_GENERIC\n"); -+ printf("#define DTB_DEFAULT_ENTRIES 64\n"); -+ break; -+ case CPU_Z13: -+ printf("#define Z13\n"); -+ printf("#define DTB_DEFAULT_ENTRIES 64\n"); -+ break; -+ } -+} -diff --git a/ctest.c b/ctest.c -index e0ef46e6..27d3b473 100644 ---- a/ctest.c -+++ b/ctest.c -@@ -105,6 +105,10 @@ ARCH_X86_64 - ARCH_POWER - #endif - -+#if defined(__s390x__) || defined(__zarch__) -+ARCH_ZARCH -+#endif -+ - #ifdef __mips64 - ARCH_MIPS64 - #endif -diff --git a/getarch.c b/getarch.c -index f8069e50..0d810e6c 100644 ---- a/getarch.c -+++ b/getarch.c -@@ -907,6 +907,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #define OPENBLAS_SUPPORTED - #endif - -+#if defined(__zarch__) || defined(__s390x__) -+#define ZARCH -+#include "cpuid_zarch.c" -+#define OPENBLAS_SUPPORTED -+#endif -+ - #ifdef INTEL_AMD - #include "cpuid_x86.c" - #define OPENBLAS_SUPPORTED -@@ -1006,7 +1012,7 @@ int main(int argc, char *argv[]){ - #ifdef FORCE - printf("CORE=%s\n", CORENAME); - #else --#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) -+#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) - printf("CORE=%s\n", get_corename()); - #endif - #endif -@@ -1113,7 +1119,7 @@ int main(int argc, char *argv[]){ - #ifdef FORCE - printf("#define CHAR_CORENAME \"%s\"\n", CORENAME); - #else --#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) -+#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) - printf("#define CHAR_CORENAME \"%s\"\n", get_corename()); - #endif - #endif -diff --git a/kernel/zarch/KERNEL b/kernel/zarch/KERNEL -new file mode 100644 -index 00000000..68d68b5f ---- /dev/null -+++ b/kernel/zarch/KERNEL -@@ -0,0 +1,30 @@ -+ifndef SCABS_KERNEL -+SCABS_KERNEL = ../generic/cabs.c -+endif -+ -+ifndef DCABS_KERNEL -+DCABS_KERNEL = ../generic/cabs.c -+endif -+ -+ifndef QCABS_KERNEL -+QCABS_KERNEL = ../generic/cabs.c -+endif -+ -+ifndef LSAME_KERNEL -+LSAME_KERNEL = ../generic/lsame.c -+endif -+ -+ifndef SGEMM_BETA -+SGEMM_BETA = ../generic/gemm_beta.c -+endif -+ifndef DGEMM_BETA -+DGEMM_BETA = ../generic/gemm_beta.c -+endif -+ifndef CGEMM_BETA -+CGEMM_BETA = ../generic/zgemm_beta.c -+endif -+ifndef ZGEMM_BETA -+ZGEMM_BETA = ../generic/zgemm_beta.c -+endif -+ -+ -diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC -new file mode 100644 -index 00000000..27157dad ---- /dev/null -+++ b/kernel/zarch/KERNEL.ZARCH_GENERIC -@@ -0,0 +1,134 @@ -+SAMAXKERNEL = ../arm/amax.c -+DAMAXKERNEL = ../arm/amax.c -+CAMAXKERNEL = ../arm/zamax.c -+ZAMAXKERNEL = ../arm/zamax.c -+ -+SAMINKERNEL = ../arm/amin.c -+DAMINKERNEL = ../arm/amin.c -+CAMINKERNEL = ../arm/zamin.c -+ZAMINKERNEL = ../arm/zamin.c -+ -+SMAXKERNEL = ../arm/max.c -+DMAXKERNEL = ../arm/max.c -+ -+SMINKERNEL = ../arm/min.c -+DMINKERNEL = ../arm/min.c -+ -+ISAMAXKERNEL = ../arm/iamax.c -+IDAMAXKERNEL = ../arm/iamax.c -+ICAMAXKERNEL = ../arm/izamax.c -+IZAMAXKERNEL = ../arm/izamax.c -+ -+ISAMINKERNEL = ../arm/iamin.c -+IDAMINKERNEL = ../arm/iamin.c -+ICAMINKERNEL = ../arm/izamin.c -+IZAMINKERNEL = ../arm/izamin.c -+ -+ISMAXKERNEL = ../arm/imax.c -+IDMAXKERNEL = ../arm/imax.c -+ -+ISMINKERNEL = ../arm/imin.c -+IDMINKERNEL = ../arm/imin.c -+ -+SASUMKERNEL = ../arm/asum.c -+DASUMKERNEL = ../arm/asum.c -+CASUMKERNEL = ../arm/zasum.c -+ZASUMKERNEL = ../arm/zasum.c -+ -+SAXPYKERNEL = ../arm/axpy.c -+DAXPYKERNEL = ../arm/axpy.c -+CAXPYKERNEL = ../arm/zaxpy.c -+ZAXPYKERNEL = ../arm/zaxpy.c -+ -+SCOPYKERNEL = ../arm/copy.c -+DCOPYKERNEL = ../arm/copy.c -+CCOPYKERNEL = ../arm/zcopy.c -+ZCOPYKERNEL = ../arm/zcopy.c -+ -+SDOTKERNEL = ../arm/dot.c -+DDOTKERNEL = ../arm/dot.c -+CDOTKERNEL = ../arm/zdot.c -+ZDOTKERNEL = ../arm/zdot.c -+ -+SNRM2KERNEL = ../arm/nrm2.c -+DNRM2KERNEL = ../arm/nrm2.c -+CNRM2KERNEL = ../arm/znrm2.c -+ZNRM2KERNEL = ../arm/znrm2.c -+ -+SROTKERNEL = ../arm/rot.c -+DROTKERNEL = ../arm/rot.c -+CROTKERNEL = ../arm/zrot.c -+ZROTKERNEL = ../arm/zrot.c -+ -+SSCALKERNEL = ../arm/scal.c -+DSCALKERNEL = ../arm/scal.c -+CSCALKERNEL = ../arm/zscal.c -+ZSCALKERNEL = ../arm/zscal.c -+ -+SSWAPKERNEL = ../arm/swap.c -+DSWAPKERNEL = ../arm/swap.c -+CSWAPKERNEL = ../arm/zswap.c -+ZSWAPKERNEL = ../arm/zswap.c -+ -+SGEMVNKERNEL = ../arm/gemv_n.c -+DGEMVNKERNEL = ../arm/gemv_n.c -+CGEMVNKERNEL = ../arm/zgemv_n.c -+ZGEMVNKERNEL = ../arm/zgemv_n.c -+ -+SGEMVTKERNEL = ../arm/gemv_t.c -+DGEMVTKERNEL = ../arm/gemv_t.c -+CGEMVTKERNEL = ../arm/zgemv_t.c -+ZGEMVTKERNEL = ../arm/zgemv_t.c -+ -+STRMMKERNEL = ../generic/trmmkernel_2x2.c -+DTRMMKERNEL = ../generic/trmmkernel_2x2.c -+CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -+ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -+ -+SGEMMKERNEL = ../generic/gemmkernel_2x2.c -+SGEMMONCOPY = ../generic/gemm_ncopy_2.c -+SGEMMOTCOPY = ../generic/gemm_tcopy_2.c -+SGEMMONCOPYOBJ = sgemm_oncopy.o -+SGEMMOTCOPYOBJ = sgemm_otcopy.o -+ -+DGEMMKERNEL = ../generic/gemmkernel_2x2.c -+DGEMMONCOPY = ../generic/gemm_ncopy_2.c -+DGEMMOTCOPY = ../generic/gemm_tcopy_2.c -+DGEMMONCOPYOBJ = dgemm_oncopy.o -+DGEMMOTCOPYOBJ = dgemm_otcopy.o -+ -+CGEMMKERNEL = ../generic/zgemmkernel_2x2.c -+CGEMMONCOPY = ../generic/zgemm_ncopy_2.c -+CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -+CGEMMONCOPYOBJ = cgemm_oncopy.o -+CGEMMOTCOPYOBJ = cgemm_otcopy.o -+ -+ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c -+ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c -+ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -+ZGEMMONCOPYOBJ = zgemm_oncopy.o -+ZGEMMOTCOPYOBJ = zgemm_otcopy.o -+ -+STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+ -+ -+ -diff --git a/kernel/zarch/Makefile b/kernel/zarch/Makefile -new file mode 100644 -index 00000000..efae70d7 ---- /dev/null -+++ b/kernel/zarch/Makefile -@@ -0,0 +1,2 @@ -+clean :: -+ -diff --git a/param.h b/param.h -index 480518cd..0268fb5e 100644 ---- a/param.h -+++ b/param.h -@@ -2509,6 +2509,45 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #endif - - -+#if defined(ZARCH_GENERIC) -+#define SNUMOPT 2 -+#define DNUMOPT 2 -+ -+#define GEMM_DEFAULT_OFFSET_A 0 -+#define GEMM_DEFAULT_OFFSET_B 0 -+#define GEMM_DEFAULT_ALIGN 0x03fffUL -+ -+#define SGEMM_DEFAULT_UNROLL_M 2 -+#define SGEMM_DEFAULT_UNROLL_N 2 -+ -+#define DGEMM_DEFAULT_UNROLL_M 2 -+#define DGEMM_DEFAULT_UNROLL_N 2 -+ -+#define CGEMM_DEFAULT_UNROLL_M 2 -+#define CGEMM_DEFAULT_UNROLL_N 2 -+ -+#define ZGEMM_DEFAULT_UNROLL_M 2 -+#define ZGEMM_DEFAULT_UNROLL_N 2 -+ -+#define SGEMM_DEFAULT_P 128 -+#define DGEMM_DEFAULT_P 128 -+#define CGEMM_DEFAULT_P 96 -+#define ZGEMM_DEFAULT_P 64 -+ -+#define SGEMM_DEFAULT_Q 240 -+#define DGEMM_DEFAULT_Q 120 -+#define CGEMM_DEFAULT_Q 120 -+#define ZGEMM_DEFAULT_Q 120 -+ -+#define SGEMM_DEFAULT_R 12288 -+#define DGEMM_DEFAULT_R 8192 -+#define CGEMM_DEFAULT_R 4096 -+#define ZGEMM_DEFAULT_R 4096 -+ -+ -+#define SYMV_P 16 -+#endif -+ - - #ifdef GENERIC - --- -2.12.2 - - -From f18efc365072feaedc5730b1a0153ab505b8deaa Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Dan=20Hor=C3=A1k?= -Date: Thu, 13 Apr 2017 12:21:10 +0200 -Subject: [PATCH 2/6] add lapack laswp for zarch - -(cherry picked from commit 56762d5e4c54428ef20e14610f1535a74e5ac701) ---- - lapack/laswp/zarch/Makefile | 8 ++++++++ - 1 file changed, 8 insertions(+) - create mode 100644 lapack/laswp/zarch/Makefile - -diff --git a/lapack/laswp/zarch/Makefile b/lapack/laswp/zarch/Makefile -new file mode 100644 -index 00000000..af1f0199 ---- /dev/null -+++ b/lapack/laswp/zarch/Makefile -@@ -0,0 +1,8 @@ -+TOPDIR = ../../.. -+include ../../../Makefile.system -+ -+LASWP = ../generic/laswp_k_1.c -+ZLASWP = ../generic/zlaswp_k_1.c -+ -+include ../generic/Makefile -+ --- -2.12.2 - - -From d105ac97e1ad4455a76a7929a04a43267daa1191 Mon Sep 17 00:00:00 2001 -From: Abdurrauf -Date: Wed, 4 Jan 2017 19:32:33 +0400 -Subject: [PATCH 3/6] dtrmm and dgemm for z13 - -(cherry picked from commit 64186678180c08db3f43524082790394a00c5008) ---- - CONTRIBUTORS.md | 4 + - Makefile.zarch | 4 +- - README.md | 5 + - common_zarch.h | 3 +- - cpuid_zarch.c | 4 +- - kernel/zarch/KERNEL.Z13 | 141 ++++ - kernel/zarch/KERNEL.ZARCH_GENERIC | 1 - - kernel/zarch/gemm8x4V.S | 615 +++++++++++++++ - kernel/zarch/kernelMacros.S | 1529 +++++++++++++++++++++++++++++++++++++ - kernel/zarch/trmm8x4V.S | 877 +++++++++++++++++++++ - param.h | 40 + - 11 files changed, 3218 insertions(+), 5 deletions(-) - create mode 100644 kernel/zarch/KERNEL.Z13 - create mode 100644 kernel/zarch/gemm8x4V.S - create mode 100644 kernel/zarch/kernelMacros.S - create mode 100644 kernel/zarch/trmm8x4V.S - -diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md -index 5ecf32b9..0489599a 100644 ---- a/CONTRIBUTORS.md -+++ b/CONTRIBUTORS.md -@@ -161,3 +161,7 @@ In chronological order: - * Kaustubh Raste - * [2016-05-09] DTRSM optimization for MIPS P5600 and I6400 using MSA - * [2016-05-20] STRSM optimization for MIPS P5600 and I6400 using MSA -+ -+* Abdelrauf -+ * [2017-01-01] dgemm and dtrmm kernels for IBM z13 -+ -diff --git a/Makefile.zarch b/Makefile.zarch -index 138c5941..9ec9dc79 100644 ---- a/Makefile.zarch -+++ b/Makefile.zarch -@@ -1,6 +1,6 @@ - - ifeq ($(CORE), Z13) --CCOMMON_OPT += -march=z13 --FCOMMON_OPT += -march=z13 -+CCOMMON_OPT += -march=z13 -mzvector -+FCOMMON_OPT += -march=z13 -mzvector - endif - -diff --git a/README.md b/README.md -index ff55edaa..5428f0eb 100644 ---- a/README.md -+++ b/README.md -@@ -106,6 +106,11 @@ Please read GotoBLAS_01Readme.txt - - **ARMV8**: Experimental - - **ARM Cortex-A57**: Experimental - -+#### IBM zEnterprise System: -+- **Z13**: Double precision real number -+ git checkout z13 -+ make USE_TRMM=1 -+ - ### Support OS: - - **GNU/Linux** - - **MingWin or Visual Studio(CMake)/Windows**: Please read . -diff --git a/common_zarch.h b/common_zarch.h -index 7c04cf42..e105574e 100644 ---- a/common_zarch.h -+++ b/common_zarch.h -@@ -103,10 +103,11 @@ static inline int blas_quickdivide(blasint x, blasint y){ - - #define PROLOGUE \ - .text ;\ -- .align 4 ;\ -+ .align 256 ;\ - .global REALNAME ;\ - .type REALNAME, %function ;\ - REALNAME: -+ - - #define EPILOGUE - -diff --git a/cpuid_zarch.c b/cpuid_zarch.c -index 248cd47e..e2e3b046 100644 ---- a/cpuid_zarch.c -+++ b/cpuid_zarch.c -@@ -42,7 +42,9 @@ static char *cpuname_lower[] = { - - int detect(void) - { -- return CPU_GENERIC; -+ // return CPU_GENERIC; -+ return CPU_Z13; -+ - } - - void get_libname(void) -diff --git a/kernel/zarch/KERNEL.Z13 b/kernel/zarch/KERNEL.Z13 -new file mode 100644 -index 00000000..91885da8 ---- /dev/null -+++ b/kernel/zarch/KERNEL.Z13 -@@ -0,0 +1,141 @@ -+SAMAXKERNEL = ../arm/amax.c -+DAMAXKERNEL = ../arm/amax.c -+CAMAXKERNEL = ../arm/zamax.c -+ZAMAXKERNEL = ../arm/zamax.c -+ -+SAMINKERNEL = ../arm/amin.c -+DAMINKERNEL = ../arm/amin.c -+CAMINKERNEL = ../arm/zamin.c -+ZAMINKERNEL = ../arm/zamin.c -+ -+SMAXKERNEL = ../arm/max.c -+DMAXKERNEL = ../arm/max.c -+ -+SMINKERNEL = ../arm/min.c -+DMINKERNEL = ../arm/min.c -+ -+ISAMAXKERNEL = ../arm/iamax.c -+IDAMAXKERNEL = ../arm/iamax.c -+ICAMAXKERNEL = ../arm/izamax.c -+IZAMAXKERNEL = ../arm/izamax.c -+ -+ISAMINKERNEL = ../arm/iamin.c -+IDAMINKERNEL = ../arm/iamin.c -+ICAMINKERNEL = ../arm/izamin.c -+IZAMINKERNEL = ../arm/izamin.c -+ -+ISMAXKERNEL = ../arm/imax.c -+IDMAXKERNEL = ../arm/imax.c -+ -+ISMINKERNEL = ../arm/imin.c -+IDMINKERNEL = ../arm/imin.c -+ -+SASUMKERNEL = ../arm/asum.c -+DASUMKERNEL = ../arm/asum.c -+CASUMKERNEL = ../arm/zasum.c -+ZASUMKERNEL = ../arm/zasum.c -+ -+SAXPYKERNEL = ../arm/axpy.c -+DAXPYKERNEL = ../arm/axpy.c -+CAXPYKERNEL = ../arm/zaxpy.c -+ZAXPYKERNEL = ../arm/zaxpy.c -+ -+SCOPYKERNEL = ../arm/copy.c -+DCOPYKERNEL = ../arm/copy.c -+CCOPYKERNEL = ../arm/zcopy.c -+ZCOPYKERNEL = ../arm/zcopy.c -+ -+SDOTKERNEL = ../arm/dot.c -+DDOTKERNEL = ../arm/dot.c -+CDOTKERNEL = ../arm/zdot.c -+ZDOTKERNEL = ../arm/zdot.c -+ -+SNRM2KERNEL = ../arm/nrm2.c -+DNRM2KERNEL = ../arm/nrm2.c -+CNRM2KERNEL = ../arm/znrm2.c -+ZNRM2KERNEL = ../arm/znrm2.c -+ -+SROTKERNEL = ../arm/rot.c -+DROTKERNEL = ../arm/rot.c -+CROTKERNEL = ../arm/zrot.c -+ZROTKERNEL = ../arm/zrot.c -+ -+SSCALKERNEL = ../arm/scal.c -+DSCALKERNEL = ../arm/scal.c -+CSCALKERNEL = ../arm/zscal.c -+ZSCALKERNEL = ../arm/zscal.c -+ -+SSWAPKERNEL = ../arm/swap.c -+DSWAPKERNEL = ../arm/swap.c -+CSWAPKERNEL = ../arm/zswap.c -+ZSWAPKERNEL = ../arm/zswap.c -+ -+SGEMVNKERNEL = ../arm/gemv_n.c -+DGEMVNKERNEL = ../arm/gemv_n.c -+CGEMVNKERNEL = ../arm/zgemv_n.c -+ZGEMVNKERNEL = ../arm/zgemv_n.c -+ -+SGEMVTKERNEL = ../arm/gemv_t.c -+DGEMVTKERNEL = ../arm/gemv_t.c -+CGEMVTKERNEL = ../arm/zgemv_t.c -+ZGEMVTKERNEL = ../arm/zgemv_t.c -+ -+STRMMKERNEL = ../generic/trmmkernel_2x2.c -+DTRMMKERNEL = trmm8x4V.S -+CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -+ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -+ -+SGEMMKERNEL = ../generic/gemmkernel_2x2.c -+SGEMMONCOPY = ../generic/gemm_ncopy_2.c -+SGEMMOTCOPY = ../generic/gemm_tcopy_2.c -+SGEMMONCOPYOBJ = sgemm_oncopy.o -+SGEMMOTCOPYOBJ = sgemm_otcopy.o -+ -+ -+ -+DGEMMKERNEL = gemm8x4V.S -+DGEMMINCOPY = ../generic/gemm_ncopy_8.c -+DGEMMITCOPY = ../generic/gemm_tcopy_8.c -+DGEMMONCOPY = ../generic/gemm_ncopy_4.c -+DGEMMOTCOPY = ../generic/gemm_tcopy_4.c -+DGEMMINCOPYOBJ = dgemm_incopy.o -+DGEMMITCOPYOBJ = dgemm_itcopy.o -+DGEMMONCOPYOBJ = dgemm_oncopy.o -+DGEMMOTCOPYOBJ = dgemm_otcopy.o -+ -+CGEMMKERNEL = ../generic/zgemmkernel_2x2.c -+CGEMMONCOPY = ../generic/zgemm_ncopy_2.c -+CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -+CGEMMONCOPYOBJ = cgemm_oncopy.o -+CGEMMOTCOPYOBJ = cgemm_otcopy.o -+ -+ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c -+ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c -+ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -+ZGEMMONCOPYOBJ = zgemm_oncopy.o -+ZGEMMOTCOPYOBJ = zgemm_otcopy.o -+ -+STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c -+ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c -+ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c -+ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -+ -+ -+ -+ -+ -diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC -index 27157dad..d80f84e7 100644 ---- a/kernel/zarch/KERNEL.ZARCH_GENERIC -+++ b/kernel/zarch/KERNEL.ZARCH_GENERIC -@@ -131,4 +131,3 @@ ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c - - - -- -diff --git a/kernel/zarch/gemm8x4V.S b/kernel/zarch/gemm8x4V.S -new file mode 100644 -index 00000000..0b4bc73c ---- /dev/null -+++ b/kernel/zarch/gemm8x4V.S -@@ -0,0 +1,615 @@ -+/*************************************************************************** -+Copyright (c) 2013-2017, The OpenBLAS Project -+All rights reserved. -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are -+met: -+1. Redistributions of source code must retain the above copyright -+notice, this list of conditions and the following disclaimer. -+2. Redistributions in binary form must reproduce the above copyright -+notice, this list of conditions and the following disclaimer in -+the documentation and/or other materials provided with the -+distribution. -+3. Neither the name of the OpenBLAS project nor the names of -+its contributors may be used to endorse or promote products -+derived from this software without specific prior written permission. -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*****************************************************************************/ -+ -+/************************************************************************************** -+* 2017/01/01 AbdelRauf (quickwritereader@gmail.com) -+* BLASTEST : OK -+* CTEST : OK -+* TEST : OK -+**************************************************************************************/ -+ -+/*********************************************************************/ -+/* Copyright 2009, 2010 The University of Texas at Austin. */ -+/* All rights reserved. */ -+/* */ -+/* Redistribution and use in source and binary forms, with or */ -+/* without modification, are permitted provided that the following */ -+/* conditions are met: */ -+/* */ -+/* 1. Redistributions of source code must retain the above */ -+/* copyright notice, this list of conditions and the following */ -+/* disclaimer. */ -+/* */ -+/* 2. Redistributions in binary form must reproduce the above */ -+/* copyright notice, this list of conditions and the following */ -+/* disclaimer in the documentation and/or other materials */ -+/* provided with the distribution. */ -+/* */ -+/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ -+/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ -+/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ -+/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ -+/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ -+/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ -+/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ -+/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ -+/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ -+/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ -+/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ -+/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ -+/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ -+/* POSSIBILITY OF SUCH DAMAGE. */ -+/* */ -+/* The views and conclusions contained in the software and */ -+/* documentation are those of the authors and should not be */ -+/* interpreted as representing official policies, either expressed */ -+/* or implied, of The University of Texas at Austin. */ -+/*********************************************************************/ -+ -+#define ASSEMBLER -+#include "common.h" -+ -+/************** Notes ON IBM abi and IBM assembly********************************************** -+* General registers r0 and r1 should be used internally whenever possible -+* General registers r2 to r5 should be second choice -+* General registers r12 to r15 should only be used for their standard function. -+* r0 should not be used as address disp register -+ -+#BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc -+ ##bm=r2,bn=r3, bk=r4, alpha=f0,ba=r5,bb=r6,stack[160] ,ldc=stack[168] -+**********************************************************************************************/ -+ -+ -+#define BM %r2 -+#define BM_CUR %r0 -+#define BN %r3 -+#define BN_CUR %r10 -+#define BK %r4 -+#define LDC_BYTE %r8 -+#define ALPHA %f0 -+#define ALPHA_VECT %v0 -+#define LOCAL_VAR1 %r9 -+#define LOCAL_VAR2 %r1 -+#define LOCAL_VAR3 %r11 -+#define A %r5 -+#define B %r6 -+#define CIJ %r7 -+#define CIJ_LOCAL %r12 -+#define ALIGN_4 .align 16 -+#define ALIGN_2 .align 8 -+#define PREFETCH_INS 1 -+ -+#include "kernelMacros.S" -+ -+/***********************************DGEMM***********************************************************/ -+ -+PROLOGUE -+ -+stmg %r6,%r12,40(%r15) -+lg CIJ, 160(%r15) -+lg LOCAL_VAR1, 168(%r15) -+srlg BN_CUR,BN,2 -+vrepg ALPHA_VECT,ALPHA_VECT,0 /*replicate alpha which in f0*/ -+sllg LDC_BYTE, LOCAL_VAR1,3 /*calculate lcd stride with bytes double=8 x<<3 */ -+cijle BN_CUR,0,.LX2 -+ -+ALIGN_4 -+.LX4_BN: -+#if defined(PREFETCH_INS) -+ pfd 1, 0(A) -+ pfd 1, 256(A) -+ pfd 1, 0(B) -+ pfd 1, 256(B) -+#endif -+srlg BM_CUR,BM,3 -+lgr LOCAL_VAR3,A -+lgr CIJ_LOCAL,CIJ -+cijle BM_CUR,0,.L4x4 -+ -+ALIGN_4 -+.L8x4_BM: /*BM_CUR LOOP */ -+ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_8x4 -+cijle LOCAL_VAR1,0,.L8x4_mod -+ -+ALIGN_4 -+.L8x4_4_BK: /*BK_CUR LOOP */ -+#if defined(PREFETCH_INS) -+ pfd 1, 512(LOCAL_VAR3) -+#endif -+ CALC_8x4_4 LOCAL_VAR3,LOCAL_VAR2 -+#if defined(PREFETCH_INS) -+ pfd 1, 512(LOCAL_VAR2) -+#endif -+brctg LOCAL_VAR1,.L8x4_4_BK -+ -+ALIGN_4 -+.L8x4_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L8x4_BK_Store -+ -+ALIGN_4 -+.L8x4_BK: /*BK_CUR LOOP */ -+ CALC_8x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x4_BK -+ -+ALIGN_4 -+.L8x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_8x4 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE -+ -+brctg BM_CUR,.L8x4_BM -+ -+ALIGN_4 -+.L4x4: -+ -+tmll BM,4 -+jz .L2x4 -+ -+ALIGN_4 -+.L4x4_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_4x4 -+cijle LOCAL_VAR1,0,.L4x4_mod -+ -+ALIGN_4 -+.L4x4_4_BK: /*BK_CUR LOOP */ -+ CALC_4x4_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x4_4_BK -+ -+ALIGN_4 -+.L4x4_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L4x4_BK_Store -+ -+ALIGN_4 -+.L4x4_BK: /*BK_CUR LOOP */ -+ CALC_4x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x4_BK -+ -+ALIGN_4 -+.L4x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_4x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_2 -+.L2x4: -+ -+tmll BM,2 -+jz .L1x4 -+ -+ALIGN_4 -+.L2x4_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_2x4 -+cijle LOCAL_VAR1,0,.L2x4_mod -+ -+ALIGN_4 -+.L2x4_4_BK: /*BK_CUR LOOP */ -+ CALC_2x4_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x4_4_BK -+ -+ALIGN_4 -+.L2x4_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L2x4_BK_Store -+ -+ALIGN_4 -+.L2x4_BK: /*BK_CUR LOOP */ -+ CALC_2x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x4_BK -+ -+ALIGN_4 -+.L2x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_2x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ -+ALIGN_4 -+.L1x4: -+ -+tmll BM,1 -+jz .Lx4_INNER_END -+ -+ALIGN_4 -+.L1x4_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_1x4 -+cijle LOCAL_VAR1,0,.L1x4_mod -+ -+ALIGN_4 -+.L1x4_4_BK: /*BK_CUR LOOP */ -+ CALC_1x4_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x4_4_BK -+ -+ALIGN_4 -+.L1x4_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L1x4_BK_Store -+ -+ALIGN_4 -+.L1x4_BK: /*BK_CUR LOOP */ -+ CALC_1x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x4_BK -+ -+ALIGN_4 -+.L1x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_1x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_2 -+.Lx4_INNER_END: -+ -+/*add LDC_BYTE_COPY to new*/ -+sllg LOCAL_VAR1,LDC_BYTE,2 /*multiply*4 */ -+sllg LOCAL_VAR2,BK,5 /*muyliply*4*sizeof(double) =multiply*32* 2**5 */ -+la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ -+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ -+ -+brctg BN_CUR,.LX4_BN -+ -+/*********************************X2 SECTION************************************************/ -+ALIGN_4 -+.LX2: -+tmll BN,2 -+jz .Lx1 -+ -+ALIGN_4 -+.Lx2_BN: -+srlg BM_CUR,BM,3 -+lgr LOCAL_VAR3,A -+lgr CIJ_LOCAL,CIJ -+cijle BM_CUR,0,.L4x2 -+ -+ -+ALIGN_4 -+.L8x2_BM: /*BM_CUR LOOP */ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_8x2 -+cijle LOCAL_VAR1,0,.L8x2_mod -+ -+ALIGN_4 -+.L8x2_4_BK: /*BK_CUR LOOP */ -+#if defined(PREFETCH_INS) -+ pfd 1, 256(LOCAL_VAR3) -+ pfd 1,64(LOCAL_VAR2) -+#endif -+ CALC_8x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x2_4_BK -+ -+ALIGN_4 -+.L8x2_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L8x2_BK_Store -+ -+ALIGN_4 -+.L8x2_BK: /*BK_CUR LOOP */ -+ CALC_8x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x2_BK -+ -+ALIGN_4 -+.L8x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_8x2 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_4 -+brctg BM_CUR,.L8x2_BM -+ -+ALIGN_2 -+.L4x2: -+ -+tmll BM,4 -+jz .L2x2 -+ -+ALIGN_4 -+.L4x2_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_4x2 -+cijle LOCAL_VAR1,0,.L4x2_mod -+ -+ALIGN_4 -+.L4x2_4_BK: /*BK_CUR LOOP */ -+ CALC_4x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x2_4_BK -+ -+ALIGN_4 -+.L4x2_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L4x2_BK_Store -+ -+ALIGN_4 -+.L4x2_BK: /*BK_CUR LOOP */ -+ CALC_4x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x2_BK -+ -+ALIGN_4 -+.L4x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_4x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_2 -+.L2x2: -+ -+tmll BM,2 -+jz .L1x2 -+ -+ALIGN_4 -+.L2x2_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_2x2 -+cijle LOCAL_VAR1,0,.L2x2_mod -+ -+ALIGN_4 -+.L2x2_4_BK: /*BK_CUR LOOP */ -+ CALC_2x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x2_4_BK -+ -+ALIGN_4 -+.L2x2_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L2x2_BK_Store -+ -+ALIGN_4 -+.L2x2_BK: /*BK_CUR LOOP */ -+ CALC_2x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x2_BK -+ -+ALIGN_4 -+.L2x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_2x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ -+ALIGN_2 -+.L1x2: -+ -+tmll BM,1 -+jz .Lx2_INNER_END -+ -+ALIGN_4 -+.L1x2_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_1x2 -+cijle LOCAL_VAR1,0,.L1x2_mod -+ -+ALIGN_4 -+.L1x2_4_BK: /*BK_CUR LOOP */ -+ CALC_1x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x2_4_BK -+ -+ALIGN_4 -+.L1x2_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L1x2_BK_Store -+ -+ALIGN_4 -+.L1x2_BK: /*BK_CUR LOOP */ -+ CALC_1x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x2_BK -+ -+ALIGN_4 -+.L1x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_1x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_2 -+.Lx2_INNER_END: -+/*add LDC_BYTE_COPY to new*/ -+la LOCAL_VAR1,0(LDC_BYTE,LDC_BYTE) /*multiply*2 */ -+sllg LOCAL_VAR2,BK,4 /*muyliply*2*sizeof(double) =multiply*16* 2**4 */ -+la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ -+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ -+ -+ -+ -+ -+/*********************************X1 SECTION************************************************/ -+ALIGN_2 -+.Lx1: -+tmll BN,1 -+jz .L_FUNC_END -+ -+ALIGN_4 -+.Lx1_BN: -+srlg BM_CUR,BM,3 -+lgr LOCAL_VAR3,A -+lgr CIJ_LOCAL,CIJ -+cijle BM_CUR,0,.L4x1 -+ -+ -+ALIGN_4 -+.L8x1_BM: /*BM_CUR LOOP */ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_8x1 -+cijle LOCAL_VAR1,0,.L8x1_mod -+ -+ALIGN_4 -+.L8x1_4_BK: /*BK_CUR LOOP */ -+#if defined(PREFETCH_INS) -+ pfd 1, 256(LOCAL_VAR3) -+#endif -+ CALC_8x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x1_4_BK -+ -+ALIGN_4 -+.L8x1_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L8x1_BK_Store -+ -+ALIGN_4 -+.L8x1_BK: /*BK_CUR LOOP */ -+ CALC_8x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x1_BK -+ -+ALIGN_4 -+.L8x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_8x1 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_4 -+brctg BM_CUR,.L8x1_BM -+ -+ALIGN_2 -+.L4x1: -+ -+tmll BM,4 -+jz .L2x1 -+ -+ALIGN_4 -+.L4x1_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_4x1 -+cijle LOCAL_VAR1,0,.L4x1_mod -+ -+ALIGN_4 -+.L4x1_4_BK: /*BK_CUR LOOP */ -+ CALC_4x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x1_4_BK -+ -+ALIGN_4 -+.L4x1_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L4x1_BK_Store -+ -+ALIGN_4 -+.L4x1_BK: /*BK_CUR LOOP */ -+ CALC_4x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x1_BK -+ -+ALIGN_4 -+.L4x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_4x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_2 -+.L2x1: -+ -+tmll BM,2 -+jz .L1x1 -+ -+ALIGN_4 -+.L2x1_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_2x1 -+cijle LOCAL_VAR1,0,.L2x1_mod -+ -+ALIGN_4 -+.L2x1_4_BK: /*BK_CUR LOOP */ -+ CALC_2x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x1_4_BK -+ -+ALIGN_4 -+.L2x1_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L2x1_BK_Store -+ -+ALIGN_4 -+.L2x1_BK: /*BK_CUR LOOP */ -+ CALC_2x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x1_BK -+ -+ALIGN_4 -+.L2x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_2x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ -+ -+ALIGN_2 -+.L1x1: -+ -+tmll BM, 1 -+jz .Lx1_INNER_END -+ -+ALIGN_4 -+.L1x1_BM: /*BM start*/ -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+ZERO_CVEC_1x1 -+cijle LOCAL_VAR1,0,.L1x1_mod -+ -+ALIGN_4 -+.L1x1_4_BK: /*BK_CUR LOOP */ -+ CALC_1x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x1_4_BK -+ -+ALIGN_4 -+.L1x1_mod: -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+jz .L1x1_BK_Store -+ -+ALIGN_4 -+.L1x1_BK: /*BK_CUR LOOP */ -+ CALC_1x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x1_BK -+ -+ALIGN_4 -+.L1x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_1x1 ALPHA ,CIJ_LOCAL, LDC_BYTE -+ -+ALIGN_2 -+.Lx1_INNER_END: -+/*add LDC_BYTE_COPY to new*/ -+sllg LOCAL_VAR2,BK,3 /*muyliply*2*sizeof(double) =multiply*8* 2**3 */ -+la CIJ,0(CIJ,LDC_BYTE) /*refresh CIJ=CIJ+LDC_BYTE */ -+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*1*sizeof(double) */ -+ -+ -+ALIGN_2 -+.L_FUNC_END: -+/*end*/ -+lmg %r6,%r12,40(%r15) -+br %r14 -+.end -+ -+ -+ -+ -diff --git a/kernel/zarch/kernelMacros.S b/kernel/zarch/kernelMacros.S -new file mode 100644 -index 00000000..cac4cb3d ---- /dev/null -+++ b/kernel/zarch/kernelMacros.S -@@ -0,0 +1,1529 @@ -+/*********************************KERNEL 8x4***********************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_8x4 -+ vzero %v16 -+ vzero %v17 -+ vzero %v18 -+ vzero %v19 -+ vzero %v20 -+ vzero %v21 -+ vzero %v22 -+ vzero %v23 -+ vzero %v24 -+ vzero %v25 -+ vzero %v26 -+ vzero %v27 -+ vzero %v28 -+ vzero %v29 -+ vzero %v30 -+ vzero %v31 -+.endm -+ -+/*Calculate for 8x4 C blocks*/ -+.macro CALC_8x4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vl %v4, 32(\PTR_A_REG) -+ vl %v5, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,16(\PTR_B_REG) -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ vlrepg %v1,24(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v26,%v4,%v7,%v26 -+ la \PTR_A_REG, 64(\PTR_A_REG) -+ vfmadb %v27,%v5,%v7,%v27 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ la \PTR_B_REG, 32(\PTR_B_REG) -+ vfmadb %v30,%v4,%v1,%v30 -+ vfmadb %v31,%v5,%v1,%v31 -+.endm -+ -+/*Calculate for 8x4_4 C blocks*/ -+.macro CALC_8x4_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vl %v4, 32(\PTR_A_REG) -+ vl %v5, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,16(\PTR_B_REG) -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ vlrepg %v1,24(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v26,%v4,%v7,%v26 -+ vfmadb %v27,%v5,%v7,%v27 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ vfmadb %v30,%v4,%v1,%v30 -+ vfmadb %v31,%v5,%v1,%v31 -+ -+ vlrepg %v7, 32(\PTR_B_REG) -+ vlrepg %v1,40(\PTR_B_REG) -+ vl %v2, 64(\PTR_A_REG) -+ vl %v3, 80(\PTR_A_REG) -+ vl %v4, 96(\PTR_A_REG) -+ vl %v5, 112(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,48(\PTR_B_REG) -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ vlrepg %v1,56(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v26,%v4,%v7,%v26 -+ vfmadb %v27,%v5,%v7,%v27 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ vfmadb %v30,%v4,%v1,%v30 -+ vfmadb %v31,%v5,%v1,%v31 -+ -+ vlrepg %v7, 64(\PTR_B_REG) -+ vlrepg %v1,72(\PTR_B_REG) -+ vl %v2, 128(\PTR_A_REG) -+ vl %v3, 144(\PTR_A_REG) -+ vl %v4, 160(\PTR_A_REG) -+ vl %v5, 176(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,80(\PTR_B_REG) -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ vlrepg %v1,88(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v26,%v4,%v7,%v26 -+ vfmadb %v27,%v5,%v7,%v27 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ vfmadb %v30,%v4,%v1,%v30 -+ vfmadb %v31,%v5,%v1,%v31 -+ -+ vlrepg %v7, 96(\PTR_B_REG) -+ vlrepg %v1,104(\PTR_B_REG) -+ vl %v2, 192(\PTR_A_REG) -+ vl %v3, 208(\PTR_A_REG) -+ vl %v4, 224(\PTR_A_REG) -+ vl %v5, 240(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,112(\PTR_B_REG) -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ vlrepg %v1,120(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v26,%v4,%v7,%v26 -+ vfmadb %v27,%v5,%v7,%v27 -+ la \PTR_B_REG, 128(\PTR_B_REG) -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ vfmadb %v30,%v4,%v1,%v30 -+ la \PTR_A_REG, 256(\PTR_A_REG) -+ vfmadb %v31,%v5,%v1,%v31 -+ -+.endm -+ -+ -+/*STORE C8X4*/ -+.macro STORE_8x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v2,16(\CIJ_REG) -+ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG) -+ -+ vl %v3,32(\CIJ_REG) -+ vfmadb %v3,%v18,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG) -+ -+ vl %v4,48(\CIJ_REG) -+ vfmadb %v4,%v19,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG) -+ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ -+ -+ /*add c LDC_BYTE*/ -+ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ -+ vl %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v3,%v22,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v4,%v23,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ -+ vl %v1,0(\CIJ_REG,LOCAL_VAR1) -+ vfmadb %v1,%v24,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,LOCAL_VAR1) -+ -+ vl %v2,16(\CIJ_REG,LOCAL_VAR1) -+ vfmadb %v2,%v25,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,LOCAL_VAR1) -+ -+ vl %v3,32(\CIJ_REG,LOCAL_VAR1) -+ vfmadb %v3,%v26,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG,LOCAL_VAR1) -+ -+ vl %v4,48(\CIJ_REG,LOCAL_VAR1) -+ vfmadb %v4,%v27,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG,LOCAL_VAR1) -+ -+ -+ vl %v1,0(\CIJ_REG,LOCAL_VAR2) -+ vfmadb %v1,%v28,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,LOCAL_VAR2) -+ -+ vl %v2,16(\CIJ_REG,LOCAL_VAR2) -+ vfmadb %v2,%v29,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,LOCAL_VAR2) -+ -+ vl %v3,32(\CIJ_REG,LOCAL_VAR2) -+ vfmadb %v3,%v30,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG,LOCAL_VAR2) -+ -+ vl %v4,48(\CIJ_REG,LOCAL_VAR2) -+ vfmadb %v4,%v31,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG,LOCAL_VAR2) -+ -+ la \CIJ_REG,64(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C8X4*/ -+.macro STORE_TRMM_8x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ -+ vfmdb %v2,%v17,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG) -+ vfmdb %v3,%v18,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG) -+ vfmdb %v4,%v19,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG) -+ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ -+ /*add c LDC_BYTE*/ -+ vfmdb %v1,%v20,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v2,%v21,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vfmdb %v3,%v22,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v4,%v23,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vfmdb %v1,%v24,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,LOCAL_VAR1) -+ vfmdb %v2,%v25,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,LOCAL_VAR1) -+ vfmdb %v3,%v26,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG,LOCAL_VAR1) -+ vfmdb %v4,%v27,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG,LOCAL_VAR1) -+ -+ vfmdb %v1,%v28,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,LOCAL_VAR2) -+ vfmdb %v2,%v29,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,LOCAL_VAR2) -+ vfmdb %v3,%v30,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG,LOCAL_VAR2) -+ vfmdb %v4,%v31,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG,LOCAL_VAR2) -+ la \CIJ_REG,64(\CIJ_REG) -+ -+.endm -+/**************************************Kernel4x4*************************************************/ -+ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_4x4 -+ vzero %v16 -+ vzero %v17 -+ vzero %v20 -+ vzero %v21 -+ vzero %v24 -+ vzero %v25 -+ vzero %v28 -+ vzero %v29 -+.endm -+ -+/*Calculate for 4x4 C blocks*/ -+.macro CALC_4x4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,16(\PTR_B_REG) -+ vlrepg %v1,24(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ la \PTR_A_REG, 32(\PTR_A_REG) -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ la \PTR_B_REG, 32(\PTR_B_REG) -+.endm -+ -+.macro CALC_4x4_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,16(\PTR_B_REG) -+ vlrepg %v1,24(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ -+ vlrepg %v7, 32(\PTR_B_REG) -+ vlrepg %v1,40(\PTR_B_REG) -+ vl %v2, 32(\PTR_A_REG) -+ vl %v3, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,48(\PTR_B_REG) -+ vlrepg %v1,56(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ -+ vlrepg %v7, 64(\PTR_B_REG) -+ vlrepg %v1,72(\PTR_B_REG) -+ vl %v2, 64(\PTR_A_REG) -+ vl %v3, 80(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,80(\PTR_B_REG) -+ vlrepg %v1,88(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v28,%v2,%v1,%v28 -+ vfmadb %v29,%v3,%v1,%v29 -+ -+ vlrepg %v7, 96(\PTR_B_REG) -+ vlrepg %v1,104(\PTR_B_REG) -+ vl %v2, 96(\PTR_A_REG) -+ vl %v3, 112(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vlrepg %v7,112(\PTR_B_REG) -+ la \PTR_A_REG, 128(\PTR_A_REG) -+ vlrepg %v1,120(\PTR_B_REG) -+ vfmadb %v24,%v2,%v7,%v24 -+ vfmadb %v25,%v3,%v7,%v25 -+ vfmadb %v28,%v2,%v1,%v28 -+ la \PTR_B_REG, 128(\PTR_B_REG) -+ vfmadb %v29,%v3,%v1,%v29 -+.endm -+ -+/*STORE C4X4*/ -+.macro STORE_4x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v2,16(\CIJ_REG) -+ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG) -+ -+ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ -+ /*add c LDC_BYTE*/ -+ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v1,0(\CIJ_REG,LOCAL_VAR1) -+ vfmadb %v1,%v24,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,LOCAL_VAR1) -+ -+ vl %v2,16(\CIJ_REG,LOCAL_VAR1) -+ vfmadb %v2,%v25,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,LOCAL_VAR1) -+ -+ -+ vl %v1,0(\CIJ_REG,LOCAL_VAR2) -+ vfmadb %v1,%v28,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,LOCAL_VAR2) -+ -+ vl %v2,16(\CIJ_REG,LOCAL_VAR2) -+ vfmadb %v2,%v29,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,LOCAL_VAR2) -+ -+ la \CIJ_REG,32(\CIJ_REG) -+.endm -+ -+/*STORE TRMM C4X4*/ -+.macro STORE_TRMM_4x4 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ /*add LDC_BYTE_reg=LDC_BYTE_original<<1 */ -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ vfmdb %v2,%v17,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG) -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ vfmdb %v1,%v20,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v2,%v21,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v1,%v24,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,LOCAL_VAR1) -+ vfmdb %v2,%v25,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,LOCAL_VAR1) -+ vfmdb %v1,%v28,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,LOCAL_VAR2) -+ vfmdb %v2,%v29,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,LOCAL_VAR2) -+ la \CIJ_REG,32(\CIJ_REG) -+.endm -+/**************************************Kernel2x4*************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_2x4 -+ vzero %v1 /*a1b1 a1b2 */ -+ vzero %v2 /*a1b3 a1b4 */ -+ vzero %v6 /*a2b1 a2b2 */ -+ vzero %v7 /*a2b3 a2b4 */ -+.endm -+ -+/*Calculate for 2x4_4 C blocks.This Time BroadCast A. but Load B multiple*/ -+.macro CALC_2x4_4 PTR_A_REG,PTR_B_REG -+ vl %v4, 0(\PTR_B_REG) -+ vl %v5,16(\PTR_B_REG) -+ vlrepg %v3, 0(\PTR_A_REG) -+ vlrepg %v16, 8(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ vfmadb %v6,%v16,%v4,%v6 -+ vfmadb %v7,%v16,%v5,%v7 -+ -+ vl %v4, 32(\PTR_B_REG) -+ vl %v5,48(\PTR_B_REG) -+ vlrepg %v3, 16(\PTR_A_REG) -+ vlrepg %v16, 24(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ vfmadb %v6,%v16,%v4,%v6 -+ vfmadb %v7,%v16,%v5,%v7 -+ -+ vl %v4, 64(\PTR_B_REG) -+ vl %v5,80(\PTR_B_REG) -+ vlrepg %v3, 32(\PTR_A_REG) -+ vlrepg %v16, 40(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ vfmadb %v6,%v16,%v4,%v6 -+ vfmadb %v7,%v16,%v5,%v7 -+ -+ vl %v4, 96(\PTR_B_REG) -+ vl %v5,112(\PTR_B_REG) -+ vlrepg %v3, 48(\PTR_A_REG) -+ vlrepg %v16, 56(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ la \PTR_B_REG, 128(\PTR_B_REG) -+ vfmadb %v6,%v16,%v4,%v6 -+ vfmadb %v7,%v16,%v5,%v7 -+ la \PTR_A_REG, 64(\PTR_A_REG) -+.endm -+ -+/*Calculate for 2x4 C blocks.This Time BroadCast A. but Load B multiple*/ -+.macro CALC_2x4 PTR_A_REG,PTR_B_REG -+ vl %v4, 0(\PTR_B_REG) -+ vl %v5,16(\PTR_B_REG) -+ vlrepg %v3, 0(\PTR_A_REG) -+ vlrepg %v16, 8(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ la \PTR_A_REG, 16(\PTR_A_REG) -+ vfmadb %v6,%v16,%v4,%v6 -+ vfmadb %v7,%v16,%v5,%v7 -+ la \PTR_B_REG, 32(\PTR_B_REG) -+.endm -+ -+.macro STORE_2x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL -+/**/ -+ vfmdb %v1,%v1,\ALPHA_REG -+ vfmdb %v2,%v2,\ALPHA_REG -+ vfmdb %v6,%v6,\ALPHA_REG -+ vfmdb %v7,%v7,\ALPHA_REG -+ vrepg %v4,%v1,1 -+ vrepg %v5,%v6,1 -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ adb %f1, 0(\CIJ_REG) -+ std %f1,0(\CIJ_REG) -+ -+ adb %f6, 8(\CIJ_REG) -+ std %f6,8(\CIJ_REG) -+ -+ adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ adb %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ std %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ /*add LDC_BYTE */ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ vrepg %v4,%v2,1 -+ vrepg %v5,%v7,1 -+ -+ adb %f2,0(\CIJ_REG,LOCAL_VAR1) -+ std %f2,0(\CIJ_REG,LOCAL_VAR1) -+ -+ adb %f7,8(\CIJ_REG,LOCAL_VAR1) -+ std %f7,8(\CIJ_REG,LOCAL_VAR1) -+ -+ adb %f4,0(\CIJ_REG,LOCAL_VAR2) -+ std %f4,0(\CIJ_REG,LOCAL_VAR2) -+ -+ adb %f5,8(\CIJ_REG,LOCAL_VAR2) -+ std %f5,8(\CIJ_REG,LOCAL_VAR2) -+ la \CIJ_REG,16(\CIJ_REG) -+ -+.endm -+ -+.macro STORE_TRMM_2x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL -+/**/ -+ vfmdb %v1,%v1,\ALPHA_REG -+ vfmdb %v2,%v2,\ALPHA_REG -+ vfmdb %v6,%v6,\ALPHA_REG -+ vfmdb %v7,%v7,\ALPHA_REG -+ vrepg %v4,%v1,1 -+ vrepg %v5,%v6,1 -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ std %f1,0(\CIJ_REG) -+ std %f6,8(\CIJ_REG) -+ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ std %f5,8(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ /*add LDC_BYTE */ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ vrepg %v4,%v2,1 -+ vrepg %v5,%v7,1 -+ std %f2,0(\CIJ_REG,LOCAL_VAR1) -+ std %f7,8(\CIJ_REG,LOCAL_VAR1) -+ std %f4,0(\CIJ_REG,LOCAL_VAR2) -+ std %f5,8(\CIJ_REG,LOCAL_VAR2) -+ la \CIJ_REG,16(\CIJ_REG) -+.endm -+ -+/**************************************Kernel1x4*************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_1x4 -+ vzero %v1 -+ vzero %v2 -+.endm -+/*Calculate for 1x4 C blocks.This Time BroadCast A. but Load B multiple*/ -+.macro CALC_1x4 PTR_A_REG,PTR_B_REG -+ vl %v4, 0(\PTR_B_REG) -+ vl %v5,16(\PTR_B_REG) -+ vlrepg %v3, 0(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ la \PTR_A_REG, 8(\PTR_A_REG) -+ vfmadb %v2,%v3,%v5,%v2 -+ la \PTR_B_REG, 32(\PTR_B_REG) -+.endm -+ -+/*Calculate for 1x4_4 C blocks.This Time BroadCast A. but Load B multiple*/ -+.macro CALC_1x4_4 PTR_A_REG,PTR_B_REG -+ vl %v4, 0(\PTR_B_REG) -+ vl %v5,16(\PTR_B_REG) -+ vlrepg %v3, 0(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ -+ vl %v4, 32(\PTR_B_REG) -+ vl %v5,48(\PTR_B_REG) -+ vlrepg %v3, 8(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ -+ vl %v4, 64(\PTR_B_REG) -+ vl %v5,80(\PTR_B_REG) -+ vlrepg %v3, 16(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ -+ vl %v4, 96(\PTR_B_REG) -+ vl %v5,112(\PTR_B_REG) -+ vlrepg %v3, 24(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ vfmadb %v2,%v3,%v5,%v2 -+ la \PTR_A_REG, 32(\PTR_A_REG) -+ la \PTR_B_REG, 128(\PTR_B_REG) -+.endm -+ -+.macro STORE_1x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL -+/**/ -+ vfmdb %v1,%v1,\ALPHA_REG -+ vfmdb %v2,%v2,\ALPHA_REG -+ vrepg %v4,%v1,1 -+ vrepg %v5,%v2,1 -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ adb %f1, 0(\CIJ_REG) -+ std %f1,0(\CIJ_REG) -+ -+ adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ /*add LDC_BYTE */ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ adb %f2,0(\CIJ_REG,LOCAL_VAR1) -+ std %f2,0(\CIJ_REG,LOCAL_VAR1) -+ adb %f5,0(\CIJ_REG,LOCAL_VAR2) -+ std %f5,0(\CIJ_REG,LOCAL_VAR2) -+ la \CIJ_REG,8(\CIJ_REG) -+ -+.endm -+ -+.macro STORE_TRMM_1x4 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL -+/**/ -+ vfmdb %v1,%v1,\ALPHA_REG -+ vfmdb %v2,%v2,\ALPHA_REG -+ vrepg %v4,%v1,1 -+ vrepg %v5,%v2,1 -+ la LOCAL_VAR1,0(\LDC_BYTE_ORIGINAL, \LDC_BYTE_ORIGINAL) -+ std %f1,0(\CIJ_REG) -+ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ /*add LDC_BYTE */ -+ la LOCAL_VAR2,0(LOCAL_VAR1,\LDC_BYTE_ORIGINAL ) -+ std %f2,0(\CIJ_REG,LOCAL_VAR1) -+ std %f5,0(\CIJ_REG,LOCAL_VAR2) -+ la \CIJ_REG,8(\CIJ_REG) -+.endm -+/***************************************BN=2 SECTION***************************************/ -+/*************************************Kernel8x2***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_8x2 -+ vzero %v16 -+ vzero %v17 -+ vzero %v18 -+ vzero %v19 -+ vzero %v20 -+ vzero %v21 -+ vzero %v22 -+ vzero %v23 -+ -+.endm -+ -+/*Calculate for 8x2 C blocks*/ -+.macro CALC_8x2 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vl %v4, 32(\PTR_A_REG) -+ vl %v5, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ la \PTR_A_REG, 64(\PTR_A_REG) -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ la \PTR_B_REG, 16(\PTR_B_REG) -+.endm -+ -+ -+/*Calculate for 8x2_4 C blocks*/ -+.macro CALC_8x2_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vl %v4, 32(\PTR_A_REG) -+ vl %v5, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ -+ vlrepg %v7, 16(\PTR_B_REG) -+ vlrepg %v1,24(\PTR_B_REG) -+ vl %v2, 64(\PTR_A_REG) -+ vl %v3, 80(\PTR_A_REG) -+ vl %v4, 96(\PTR_A_REG) -+ vl %v5, 112(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ -+ vlrepg %v7, 32(\PTR_B_REG) -+ vlrepg %v1,40(\PTR_B_REG) -+ vl %v2, 128(\PTR_A_REG) -+ vl %v3, 144(\PTR_A_REG) -+ vl %v4, 160(\PTR_A_REG) -+ vl %v5, 176(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ -+ vlrepg %v7, 48(\PTR_B_REG) -+ vlrepg %v1,56(\PTR_B_REG) -+ vl %v2, 192(\PTR_A_REG) -+ vl %v3, 208(\PTR_A_REG) -+ vl %v4, 224(\PTR_A_REG) -+ vl %v5, 240(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ la \PTR_B_REG, 64(\PTR_B_REG) -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ vfmadb %v22,%v4,%v1,%v22 -+ vfmadb %v23,%v5,%v1,%v23 -+ la \PTR_A_REG, 256(\PTR_A_REG) -+.endm -+ -+/*STORE C8X2*/ -+.macro STORE_8x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v2,16(\CIJ_REG) -+ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG) -+ -+ vl %v3,32(\CIJ_REG) -+ vfmadb %v3,%v18,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG) -+ -+ vl %v4,48(\CIJ_REG) -+ vfmadb %v4,%v19,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG) -+ -+ -+ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ -+ vl %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v3,%v22,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v4,%v23,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ -+ la \CIJ_REG,64(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C8X2*/ -+.macro STORE_TRMM_8x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ vfmdb %v2,%v17,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG) -+ vfmdb %v3,%v18,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG) -+ vfmdb %v4,%v19,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG) -+ vfmdb %v1,%v20,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v2,%v21,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v3,%v22,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v4,%v23,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ la \CIJ_REG,64(\CIJ_REG) -+.endm -+ -+/*************************************Kernel4x2***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_4x2 -+ vzero %v16 -+ vzero %v17 -+ vzero %v20 -+ vzero %v21 -+ -+.endm -+ -+/*Calculate for 4x2 C blocks*/ -+.macro CALC_4x2 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ la \PTR_A_REG, 32(\PTR_A_REG) -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ la \PTR_B_REG, 16(\PTR_B_REG) -+.endm -+ -+/*Calculate for 4x2_4 C blocks*/ -+.macro CALC_4x2_4 PTR_A_REG,PTR_B_REG -+ -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ -+ vlrepg %v7, 16(\PTR_B_REG) -+ vlrepg %v1,24(\PTR_B_REG) -+ vl %v2, 32(\PTR_A_REG) -+ vl %v3, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ -+ vlrepg %v7, 32(\PTR_B_REG) -+ vlrepg %v1,40(\PTR_B_REG) -+ vl %v2, 64(\PTR_A_REG) -+ vl %v3, 80(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ -+ -+ vlrepg %v7, 48(\PTR_B_REG) -+ vlrepg %v1,56(\PTR_B_REG) -+ vl %v2, 96(\PTR_A_REG) -+ vl %v3, 112(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ la \PTR_B_REG, 64(\PTR_B_REG) -+ vfmadb %v20,%v2,%v1,%v20 -+ vfmadb %v21,%v3,%v1,%v21 -+ la \PTR_A_REG, 128(\PTR_A_REG) -+.endm -+ -+ -+/*STORE C4x2*/ -+.macro STORE_4x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v2,16(\CIJ_REG) -+ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG) -+ -+ -+ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ vl %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v2,%v21,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ la \CIJ_REG,32(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C4x2*/ -+.macro STORE_TRMM_4x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ vfmdb %v2,%v17,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG) -+ vfmdb %v1,%v20,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmdb %v2,%v21,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ la \CIJ_REG,32(\CIJ_REG) -+.endm -+ -+/*************************************Kernel2x2***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_2x2 -+ vzero %v16 -+ vzero %v20 -+ -+.endm -+ -+/*Calculate for 2x2 C blocks*/ -+.macro CALC_2x2 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ la \PTR_A_REG, 16(\PTR_A_REG) -+ vfmadb %v20,%v2,%v1,%v20 -+ la \PTR_B_REG, 16(\PTR_B_REG) -+.endm -+ -+/*Calculate for 2x2_4 C blocks*/ -+.macro CALC_2x2_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vlrepg %v1,8(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v20,%v2,%v1,%v20 -+ -+ vlrepg %v7, 16(\PTR_B_REG) -+ vlrepg %v1,24(\PTR_B_REG) -+ vl %v2, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v20,%v2,%v1,%v20 -+ -+ vlrepg %v7, 32(\PTR_B_REG) -+ vlrepg %v1,40(\PTR_B_REG) -+ vl %v2, 32(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v20,%v2,%v1,%v20 -+ -+ -+ vlrepg %v7, 48(\PTR_B_REG) -+ vlrepg %v1,56(\PTR_B_REG) -+ vl %v2, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v20,%v2,%v1,%v20 -+ -+ la \PTR_B_REG, 64(\PTR_B_REG) -+ la \PTR_A_REG, 64(\PTR_A_REG) -+.endm -+ -+/*STORE C2x2*/ -+.macro STORE_2x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ vfmadb %v1,%v20,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ la \CIJ_REG,16(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C2x2*/ -+.macro STORE_TRMM_2x2 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ vfmdb %v1,%v20,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ la \CIJ_REG,16(\CIJ_REG) -+.endm -+ -+/**************************************Kernel1x2*************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_1x2 -+ vzero %v1 -+.endm -+/*Calculate for 1x2 C blocks.This Time BroadCast A. but Load B multiple*/ -+.macro CALC_1x2 PTR_A_REG,PTR_B_REG -+ vl %v4, 0(\PTR_B_REG) -+ vlrepg %v3, 0(\PTR_A_REG) -+ la \PTR_B_REG, 16(\PTR_B_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ la \PTR_A_REG, 8(\PTR_A_REG) -+.endm -+ -+.macro CALC_1x2_4 PTR_A_REG,PTR_B_REG -+ vl %v4, 0(\PTR_B_REG) -+ vlrepg %v3, 0(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ -+ vl %v4, 16(\PTR_B_REG) -+ vlrepg %v3, 8(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ -+ vl %v4, 32(\PTR_B_REG) -+ vlrepg %v3, 16(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ -+ vl %v4, 48(\PTR_B_REG) -+ vlrepg %v3, 24(\PTR_A_REG) -+ vfmadb %v1,%v3,%v4,%v1 -+ -+ la \PTR_B_REG, 64(\PTR_B_REG) -+ la \PTR_A_REG, 32(\PTR_A_REG) -+.endm -+ -+.macro STORE_1x2 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL -+/**/ -+ vfmdb %v1,%v1,\ALPHA_REG -+ vrepg %v4,%v1,1 -+ adb %f1, 0(\CIJ_REG) -+ std %f1,0(\CIJ_REG) -+ -+ adb %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ -+ la \CIJ_REG,8(\CIJ_REG) -+ -+.endm -+ -+.macro STORE_TRMM_1x2 ALPHA_REG,CIJ_REG , LDC_BYTE_ORIGINAL -+/**/ -+ vfmdb %v1,%v1,\ALPHA_REG -+ vrepg %v4,%v1,1 -+ std %f1,0(\CIJ_REG) -+ std %f4,0(\CIJ_REG,\LDC_BYTE_ORIGINAL) -+ la \CIJ_REG,8(\CIJ_REG) -+.endm -+ -+/**************************************BN=1*******************************************************/ -+/*************************************Kernel8x1***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_8x1 -+ vzero %v16 -+ vzero %v17 -+ vzero %v18 -+ vzero %v19 -+.endm -+/*Calculate for 8x1 C blocks*/ -+.macro CALC_8x1 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vl %v4, 32(\PTR_A_REG) -+ vl %v5, 48(\PTR_A_REG) -+ la \PTR_B_REG, 8(\PTR_B_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ la \PTR_A_REG, 64(\PTR_A_REG) -+ vfmadb %v19,%v5,%v7,%v19 -+.endm -+ -+/*Calculate for 8x1_4 C blocks*/ -+.macro CALC_8x1_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vl %v4, 32(\PTR_A_REG) -+ vl %v5, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ -+ vlrepg %v7, 8(\PTR_B_REG) -+ vl %v2, 64(\PTR_A_REG) -+ vl %v3, 80(\PTR_A_REG) -+ vl %v4, 96(\PTR_A_REG) -+ vl %v5, 112(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ -+ vlrepg %v7, 16(\PTR_B_REG) -+ vl %v2, 128(\PTR_A_REG) -+ vl %v3, 144(\PTR_A_REG) -+ vl %v4, 160(\PTR_A_REG) -+ vl %v5, 176(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ -+ vlrepg %v7, 24(\PTR_B_REG) -+ vl %v2, 192(\PTR_A_REG) -+ vl %v3, 208(\PTR_A_REG) -+ vl %v4, 224(\PTR_A_REG) -+ vl %v5, 240(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ vfmadb %v18,%v4,%v7,%v18 -+ vfmadb %v19,%v5,%v7,%v19 -+ -+ -+ la \PTR_A_REG, 256(\PTR_A_REG) -+ la \PTR_B_REG, 32(\PTR_B_REG) -+.endm -+ -+/*STORE C8X1*/ -+.macro STORE_8x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v2,16(\CIJ_REG) -+ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG) -+ -+ vl %v3,32(\CIJ_REG) -+ vfmadb %v3,%v18,\ALPHA_VECREG,%v3 -+ vst %v3,32(\CIJ_REG) -+ -+ vl %v4,48(\CIJ_REG) -+ vfmadb %v4,%v19,\ALPHA_VECREG,%v4 -+ vst %v4,48(\CIJ_REG) -+ -+ la \CIJ_REG,64(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C8X1*/ -+.macro STORE_TRMM_8x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ vfmdb %v2,%v17,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG) -+ vfmdb %v3,%v18,\ALPHA_VECREG -+ vst %v3,32(\CIJ_REG) -+ vfmdb %v4,%v19,\ALPHA_VECREG -+ vst %v4,48(\CIJ_REG) -+ la \CIJ_REG,64(\CIJ_REG) -+.endm -+ -+ -+/*************************************Kernel4x1***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_4x1 -+ vzero %v16 -+ vzero %v17 -+.endm -+/*Calculate for 4x1 C blocks*/ -+.macro CALC_4x1 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ la \PTR_B_REG, 8(\PTR_B_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ la \PTR_A_REG, 32(\PTR_A_REG) -+.endm -+ -+/*Calculate for 4x1_4 C blocks*/ -+.macro CALC_4x1_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vl %v3, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ -+ vlrepg %v7, 8(\PTR_B_REG) -+ vl %v2, 32(\PTR_A_REG) -+ vl %v3, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ -+ vlrepg %v7, 16(\PTR_B_REG) -+ vl %v2, 64(\PTR_A_REG) -+ vl %v3, 80(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ -+ vlrepg %v7, 24(\PTR_B_REG) -+ vl %v2, 96(\PTR_A_REG) -+ vl %v3, 112(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ vfmadb %v17,%v3,%v7,%v17 -+ -+ la \PTR_B_REG, 32(\PTR_B_REG) -+ la \PTR_A_REG, 128(\PTR_A_REG) -+.endm -+ -+/*STORE C4X1*/ -+.macro STORE_4x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ vl %v2,16(\CIJ_REG) -+ vfmadb %v2,%v17,\ALPHA_VECREG,%v2 -+ vst %v2,16(\CIJ_REG) -+ -+ -+ la \CIJ_REG,32(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C4X1*/ -+.macro STORE_TRMM_4x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ vfmdb %v2,%v17,\ALPHA_VECREG -+ vst %v2,16(\CIJ_REG) -+ la \CIJ_REG,32(\CIJ_REG) -+.endm -+/*************************************Kernel2x1***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_2x1 -+ vzero %v16 -+.endm -+/*Calculate for 2x1 C blocks*/ -+.macro CALC_2x1 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ la \PTR_B_REG, 8(\PTR_B_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ la \PTR_A_REG, 16(\PTR_A_REG) -+.endm -+ -+/*Calculate for 2x1_4 C blocks*/ -+.macro CALC_2x1_4 PTR_A_REG,PTR_B_REG -+ vlrepg %v7, 0(\PTR_B_REG) -+ vl %v2, 0(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ -+ vlrepg %v7, 8(\PTR_B_REG) -+ vl %v2, 16(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ -+ vlrepg %v7, 16(\PTR_B_REG) -+ vl %v2, 32(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ -+ vlrepg %v7, 24(\PTR_B_REG) -+ vl %v2, 48(\PTR_A_REG) -+ vfmadb %v16,%v2,%v7,%v16 -+ -+ la \PTR_B_REG, 32(\PTR_B_REG) -+ la \PTR_A_REG, 64(\PTR_A_REG) -+.endm -+ -+/*STORE C2X1*/ -+.macro STORE_2x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ -+ vl %v1,0(\CIJ_REG) -+ vfmadb %v1,%v16,\ALPHA_VECREG,%v1 -+ vst %v1,0(\CIJ_REG) -+ -+ la \CIJ_REG,16(\CIJ_REG) -+ -+.endm -+ -+/*STORE TRMM C2X1*/ -+.macro STORE_TRMM_2x1 ALPHA_VECREG,CIJ_REG , LDC_BYTE_ORIGINAL -+ vfmdb %v1,%v16,\ALPHA_VECREG -+ vst %v1,0(\CIJ_REG) -+ la \CIJ_REG,16(\CIJ_REG) -+.endm -+/*************************************Kernel1x1***************************************************/ -+/*Zero C block Vectors*/ -+.macro ZERO_CVEC_1x1 -+ LZDR %f1 -+.endm -+/*Calculate for 1x1 C blocks*/ -+.macro CALC_1x1 PTR_A_REG,PTR_B_REG -+ ld %f2,0(\PTR_A_REG) /**a*/ -+ la \PTR_A_REG,8(\PTR_A_REG) -+ madb %f1,%f2,0(\PTR_B_REG) -+ la \PTR_B_REG,8(\PTR_B_REG) -+.endm -+ -+/*Calculate for 1x1_4 C blocks*/ -+.macro CALC_1x1_4 PTR_A_REG,PTR_B_REG -+ ld %f2,0(\PTR_A_REG) /**a*/ -+ madb %f1,%f2,0(\PTR_B_REG) -+ -+ ld %f2,8(\PTR_A_REG) /**a*/ -+ madb %f1,%f2,8(\PTR_B_REG) -+ -+ ld %f2,16(\PTR_A_REG) /**a*/ -+ madb %f1,%f2,16(\PTR_B_REG) -+ -+ ld %f2,24(\PTR_A_REG) /**a*/ -+ madb %f1,%f2,24(\PTR_B_REG) -+ -+ la \PTR_A_REG,32(\PTR_A_REG) -+ la \PTR_B_REG,32(\PTR_B_REG) -+.endm -+ -+/*STORE C1X1*/ -+.macro STORE_1x1 ALPHA_FLOAT,CIJ_REG,LDC_BYTE_ORIGINAL -+ ld %f2,0(CIJ_LOCAL) -+ madbr %f2,%f1,\ALPHA_FLOAT -+ std %f2,0(CIJ_LOCAL) -+ la \CIJ_REG,8(\CIJ_REG) -+.endm -+ -+/*STORE C1X1*/ -+.macro STORE_TRMM_1x1 ALPHA_FLOAT,CIJ_REG,LDC_BYTE_ORIGINAL -+ mdbr %f1,\ALPHA_FLOAT -+ std %f1,0(CIJ_LOCAL) -+ la \CIJ_REG,8(\CIJ_REG) -+.endm -+ -+ -+/****************************TRMM POINTER REFRESH MACROSES*************************/ -+ -+.macro RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B -+ #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) -+ /* ptrbb = bb;*/ -+ lgr \PTR_B,\B_VAL /*refresh BPOINT*/ -+ -+ #else -+ /* ptrba =ptrba+ off*C_A; -+ ptrbb = bb + off*C_B;*/ -+.if \C_B==4 -+ .if \C_A==8 -+ sllg \PTR_B, \OFF_VAL,5 -+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*4*/ -+ agr \PTR_A,\PTR_B /*ptrba+off*4**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) -+ .elseif \C_A==4 -+ sllg \PTR_B, \OFF_VAL,5 -+ agr \PTR_A,\PTR_B /*ptrba+off*4**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .elseif \C_A==2 -+ sllg \PTR_B, \OFF_VAL,4 -+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/ -+ agr \PTR_B, \PTR_B -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ -+ .elseif \C_A==1 -+ sllg \PTR_B, \OFF_VAL,3 -+ agr \PTR_A,\PTR_B /*ptrba+off*4**/ -+ sllg \PTR_B, \OFF_VAL,5 -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .endif -+ -+.elseif \C_B==2 -+ .if \C_A==8 -+ sllg \PTR_B, \OFF_VAL,6 -+ agr \PTR_A,\PTR_B /*ptrba+off*8**/ -+ sllg \PTR_B, \OFF_VAL,4 -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .elseif \C_A==4 -+ sllg \PTR_B, \OFF_VAL,4 -+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*2**/ -+ agr \PTR_A,\PTR_B /*ptrba+off*2**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .elseif \C_A==2 -+ sllg \PTR_B, \OFF_VAL,4 -+ agr \PTR_A,\PTR_B /*ptrba+off*2**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .elseif \C_A==1 -+ sllg \PTR_B, \OFF_VAL,3 -+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/ -+ agr \PTR_B,\PTR_B /* off+off**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .endif -+ -+.elseif \C_B==1 -+ .if \C_A==8 -+ sllg \PTR_B, \OFF_VAL,6 -+ agr \PTR_A,\PTR_B /*ptrba+off*8**/ -+ sllg \PTR_B, \OFF_VAL,3 -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .elseif \C_A==4 -+ sllg \PTR_B, \OFF_VAL,5 -+ agr \PTR_A,\PTR_B /*ptrba+off*4**/ -+ sllg \PTR_B, \OFF_VAL,3 -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .elseif \C_A==2 -+ sllg \PTR_B, \OFF_VAL,3 -+ la \PTR_A,0(\PTR_A,\PTR_B) /*ptrba+off*1**/ -+ agr \PTR_A,\PTR_B /*ptrba+off*1**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ -+ .elseif \C_A==1 -+ sllg \PTR_B, \OFF_VAL,3 -+ agr \PTR_A,\PTR_B /*ptrba+off*1**/ -+ la \PTR_B,0(\B_VAL,\PTR_B) /*refresh BPOINT*/ -+ .endif -+.endif -+ -+ -+ #endif -+.endm -+ -+/**/ -+.macro RefreshTempBk TEMP_VAL,BK_VAL,OFF_VAL,INCR_A,INCR_B -+ #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) -+ /* temp = bk-off;*/ -+ sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL -+ -+ #elif defined(LEFT) -+ /* temp = off+INCR_A; // number of values in A */ -+ la \TEMP_VAL,\INCR_A(\OFF_VAL) -+ #else -+ /* temp = off+INCR_B // number of values in B*/ -+ la \TEMP_VAL,\INCR_B(\OFF_VAL) -+ #endif -+ -+.endm -+ -+ -+.macro RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,PTR_B,PTR_A,C_A,C_B -+ -+ #if ( defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA)) -+ /*temp = bk - off;*/ -+ sgrk \TEMP_VAL,\BK_VAL,\OFF_VAL -+ #ifdef LEFT -+ /*temp -= 8; // number of values in A*/ -+ lay \TEMP_VAL,-\C_A(\TEMP_VAL) -+ #else -+ /*temp -= 4; // number of values in B*/ -+ lay \TEMP_VAL,-\C_B(\TEMP_VAL) -+ #endif -+ /*ptrba += temp*C_A; -+ ptrbb += temp*C_B;*/ -+ .if \C_B==4 -+ .if \C_A==8 -+ sllg \TEMP_VAL, \TEMP_VAL,5 /*temp*4*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==4 -+ sllg \TEMP_VAL, \TEMP_VAL,5 /*temp*4*/ -+ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==2 -+ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ -+ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ -+ .elseif \C_A==1 -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ sllg \TEMP_VAL, \TEMP_VAL,2 /*temp*2*2*/ -+ agr \PTR_B, \TEMP_VAL /*ptrbb+temp*C_B*/ -+ .endif -+ .elseif \C_B==2 -+ .if \C_A==8 -+ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ sllg \TEMP_VAL, \TEMP_VAL,2 /*temp*2*4 */ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==4 -+ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ agr \TEMP_VAL, \TEMP_VAL -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==2 -+ sllg \TEMP_VAL, \TEMP_VAL,4 /*temp*2*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ -+ .elseif \C_A==1 -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ .endif -+ .elseif \C_B==1 -+ .if \C_A==8 -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*8 */ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==4 -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ sllg \TEMP_VAL, \TEMP_VAL,2 /*temp*1*4 */ -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==2 -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ agr \TEMP_VAL, \TEMP_VAL -+ la \PTR_A,0(\PTR_A,\TEMP_VAL) /*ptrba+temp*C_A*/ -+ .elseif \C_A==1 -+ sllg \TEMP_VAL, \TEMP_VAL,3 /*temp*1*/ -+ la \PTR_B,0(\PTR_B,\TEMP_VAL) /*ptrbb+temp*C_B*/ -+ agr \PTR_A, \TEMP_VAL /*ptrba+temp*C_A*/ -+ .endif -+ .endif -+ #endif -+ -+ #ifdef LEFT -+ /*off += 8; // number of values in A*/ -+ aghi \OFF_VAL,\C_A -+ #endif -+.endm -\ No newline at end of file -diff --git a/kernel/zarch/trmm8x4V.S b/kernel/zarch/trmm8x4V.S -new file mode 100644 -index 00000000..8e6a03c1 ---- /dev/null -+++ b/kernel/zarch/trmm8x4V.S -@@ -0,0 +1,877 @@ -+/*************************************************************************** -+Copyright (c) 2013-2017, The OpenBLAS Project -+All rights reserved. -+Redistribution and use in source and binary forms, with or without -+modification, are permitted provided that the following conditions are -+met: -+1. Redistributions of source code must retain the above copyright -+notice, this list of conditions and the following disclaimer. -+2. Redistributions in binary form must reproduce the above copyright -+notice, this list of conditions and the following disclaimer in -+the documentation and/or other materials provided with the -+distribution. -+3. Neither the name of the OpenBLAS project nor the names of -+its contributors may be used to endorse or promote products -+derived from this software without specific prior written permission. -+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE -+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*****************************************************************************/ -+ -+/************************************************************************************** -+* 2017/01/01 AbdelRauf (quickwritereader@gmail.com) -+* BLASTEST : OK -+* CTEST : OK -+* TEST : OK -+**************************************************************************************/ -+ -+/*********************************************************************/ -+/* Copyright 2009, 2010 The University of Texas at Austin. */ -+/* All rights reserved. */ -+/* */ -+/* Redistribution and use in source and binary forms, with or */ -+/* without modification, are permitted provided that the following */ -+/* conditions are met: */ -+/* */ -+/* 1. Redistributions of source code must retain the above */ -+/* copyright notice, this list of conditions and the following */ -+/* disclaimer. */ -+/* */ -+/* 2. Redistributions in binary form must reproduce the above */ -+/* copyright notice, this list of conditions and the following */ -+/* disclaimer in the documentation and/or other materials */ -+/* provided with the distribution. */ -+/* */ -+/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ -+/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ -+/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ -+/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ -+/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ -+/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ -+/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ -+/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ -+/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ -+/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ -+/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ -+/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ -+/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ -+/* POSSIBILITY OF SUCH DAMAGE. */ -+/* */ -+/* The views and conclusions contained in the software and */ -+/* documentation are those of the authors and should not be */ -+/* interpreted as representing official policies, either expressed */ -+/* or implied, of The University of Texas at Austin. */ -+/*********************************************************************/ -+ -+#define ASSEMBLER -+#include "common.h" -+ -+/************** Notes ON IBM abi and IBM assembly********************************************** -+* General registers r0 and r1 should be used internally whenever possible -+* General registers r2 to r5 should be second choice -+* General registers r12 to r15 should only be used for their standard function. -+* r0 should not be used as address disp register -+ -+#BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc -+ ##bm=r2,bn=r3, bk=r4, alpha=f0,ba=r5,bb=r6,stack[160] ,ldc=stack[168] -+offset=stack[176] -+**********************************************************************************************/ -+ -+ -+#define BM %r2 -+#define BM_CUR %r0 -+#define BN %r3 -+#define BN_CUR %r10 -+#define BK %r4 -+#define LDC_BYTE %r8 -+#define ALPHA %f0 -+#define ALPHA_VECT %v0 -+#define LOCAL_VAR1 %r9 -+#define LOCAL_VAR2 %r1 -+#define LOCAL_VAR3 %r11 -+#define A %r5 -+#define B %r6 -+#define CIJ %r7 -+#define CIJ_LOCAL %r12 -+#define OFF %r13 -+#define OFFSET %f8 -+#define ALIGN_4 .align 16 -+#define ALIGN_2 .align 8 -+#define PREFETCH_INS 1 -+ -+/**************************Include kernel helper macrosses**********************************/ -+#include "kernelMacros.S" -+ -+#if defined (TRMMKERNEL) -+ -+#define STORE_8x4 STORE_TRMM_8x4 -+#define STORE_4x4 STORE_TRMM_4x4 -+#define STORE_2x4 STORE_TRMM_2x4 -+#define STORE_1x4 STORE_TRMM_1x4 -+ -+#define STORE_8x2 STORE_TRMM_8x2 -+#define STORE_4x2 STORE_TRMM_4x2 -+#define STORE_2x2 STORE_TRMM_2x2 -+#define STORE_1x2 STORE_TRMM_1x2 -+ -+#define STORE_8x1 STORE_TRMM_8x1 -+#define STORE_4x1 STORE_TRMM_4x1 -+#define STORE_2x1 STORE_TRMM_2x1 -+#define STORE_1x1 STORE_TRMM_1x1 -+ -+#endif -+ -+/***********************************DGEMM***********************************************************/ -+ -+PROLOGUE -+#if defined(TRMMKERNEL) -+stmg %r6,%r13,40(%r15) -+#else -+stmg %r6,%r12,40(%r15) -+#endif -+lg CIJ, 160(%r15) -+lg LOCAL_VAR1, 168(%r15) -+#if defined(TRMMKERNEL) -+lg OFF,176(%r15) -+std OFFSET,32(%r15) -+ldgr OFFSET ,OFF -+#endif -+srlg BN_CUR,BN,2 -+vrepg ALPHA_VECT,ALPHA_VECT,0 /*replicate alpha which in f0*/ -+ -+sllg LDC_BYTE, LOCAL_VAR1,3 /*calculate lcd stride with bytes double=8 x<<3 */ -+#if defined(TRMMKERNEL) && !defined(LEFT) -+ /*off = -offset;*/ -+ lgdr LOCAL_VAR1,OFFSET -+ lcgr OFF,LOCAL_VAR1 -+#endif -+cijle BN_CUR,0,.LX2 -+ -+ALIGN_4 -+.LX4_BN: -+#if defined(PREFETCH_INS) -+ pfd 1, 0(A) -+ pfd 1, 256(A) -+ pfd 1, 0(B) -+ pfd 1, 256(B) -+#endif -+#if defined(TRMMKERNEL) && defined(LEFT) -+ /*off = offset;*/ -+ lgdr OFF,OFFSET -+#endif -+srlg BM_CUR,BM,3 -+lgr LOCAL_VAR3,A -+lgr CIJ_LOCAL,CIJ -+cijle BM_CUR,0,.L4x4 -+ALIGN_4 -+.L8x4_BM: /*BM_CUR LOOP */ -+ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,4 -+ -+ RefreshTempBk LOCAL_VAR1,BK,OFF,8,4 -+ srl LOCAL_VAR1,2 -+ -+#else -+ srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+ lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ -+ZERO_CVEC_8x4 -+cijle LOCAL_VAR1,0,.L8x4_mod -+ -+ -+ALIGN_4 -+.L8x4_4_BK: /*BK_CUR LOOP */ -+#if defined(PREFETCH_INS) -+ pfd 1, 512(LOCAL_VAR3) -+#endif -+ CALC_8x4_4 LOCAL_VAR3,LOCAL_VAR2 -+#if defined(PREFETCH_INS) -+ pfd 1, 512(LOCAL_VAR2) -+#endif -+brctg LOCAL_VAR1,.L8x4_4_BK -+ -+ALIGN_4 -+.L8x4_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,8,4 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L8x4_BK_Store -+ -+ALIGN_4 -+.L8x4_BK: /*BK_CUR LOOP */ -+ CALC_8x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x4_BK -+ -+ALIGN_4 -+.L8x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_8x4 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ /*RefreshPointersAndOFF TEMP_VAL,BK_VAL,OFF_VAL,L_VAR,PTR_A,C_A*/ -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,4 -+#endif -+brctg BM_CUR,.L8x4_BM -+ -+ALIGN_4 -+.L4x4: -+ -+tmll BM,4 -+jz .L2x4 -+ -+ALIGN_4 -+.L4x4_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,4,4 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 -+ srl LOCAL_VAR1,2 -+ -+#else -+ srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+ lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_4x4 -+cijle LOCAL_VAR1,0,.L4x4_mod -+ -+ALIGN_4 -+.L4x4_4_BK: /*BK_CUR LOOP */ -+ CALC_4x4_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x4_4_BK -+ -+ALIGN_4 -+.L4x4_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,4,4 -+ nill LOCAL_VAR1,3 -+#else -+ la LOCAL_VAR1,3(0,0) -+ NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L4x4_BK_Store -+ -+ALIGN_4 -+.L4x4_BK: /*BK_CUR LOOP */ -+ CALC_4x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x4_BK -+ -+ALIGN_4 -+.L4x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_4x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,4 -+#endif -+ALIGN_2 -+.L2x4: -+ -+tmll BM,2 -+jz .L1x4 -+ -+ALIGN_4 -+.L2x4_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,4 -+ -+ RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_2x4 -+cijle LOCAL_VAR1,0,.L2x4_mod -+ -+ALIGN_4 -+.L2x4_4_BK: /*BK_CUR LOOP */ -+ CALC_2x4_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x4_4_BK -+ -+ALIGN_4 -+.L2x4_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,2,4 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L2x4_BK_Store -+ -+ALIGN_4 -+.L2x4_BK: /*BK_CUR LOOP */ -+ CALC_2x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x4_BK -+ -+ALIGN_4 -+.L2x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_2x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,4 -+#endif -+ -+ALIGN_4 -+.L1x4: -+ -+tmll BM,1 -+jz .Lx4_INNER_END -+ -+ALIGN_4 -+.L1x4_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,1,4 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_1x4 -+cijle LOCAL_VAR1,0,.L1x4_mod -+ -+ALIGN_4 -+.L1x4_4_BK: /*BK_CUR LOOP */ -+ CALC_1x4_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x4_4_BK -+ -+ALIGN_4 -+.L1x4_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,1,4 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L1x4_BK_Store -+ -+ALIGN_4 -+.L1x4_BK: /*BK_CUR LOOP */ -+ CALC_1x4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x4_BK -+ -+ALIGN_4 -+.L1x4_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_1x4 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,4 -+#endif -+ALIGN_2 -+.Lx4_INNER_END: -+ -+ -+/*add LDC_BYTE_COPY to new*/ -+sllg LOCAL_VAR1,LDC_BYTE,2 /*multiply*4 */ -+#if defined(TRMMKERNEL) && !defined(LEFT) -+ aghi OFF,4 -+#endif -+sllg LOCAL_VAR2,BK,5 /*muyliply*4*sizeof(double) =multiply*32* 2**5 */ -+la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ -+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ -+ -+brctg BN_CUR,.LX4_BN -+ -+/*********************************X2 SECTION************************************************/ -+ALIGN_4 -+.LX2: -+tmll BN,2 -+jz .Lx1 -+ -+ALIGN_4 -+.Lx2_BN: -+ -+#if defined(TRMMKERNEL) && defined(LEFT) -+ /*off = offset;*/ -+ lgdr OFF,OFFSET -+#endif -+ -+srlg BM_CUR,BM,3 -+lgr LOCAL_VAR3,A -+lgr CIJ_LOCAL,CIJ -+cijle BM_CUR,0,.L4x2 -+ -+ -+ALIGN_4 -+.L8x2_BM: /*BM_CUR LOOP */ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,2 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,8,2 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_8x2 -+cijle LOCAL_VAR1,0,.L8x2_mod -+ -+ALIGN_4 -+.L8x2_4_BK: /*BK_CUR LOOP */ -+#if defined(PREFETCH_INS) -+ pfd 1, 256(LOCAL_VAR3) -+ pfd 1,64(LOCAL_VAR2) -+#endif -+ CALC_8x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x2_4_BK -+ -+ALIGN_4 -+.L8x2_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,8,2 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L8x2_BK_Store -+ -+ALIGN_4 -+.L8x2_BK: /*BK_CUR LOOP */ -+ CALC_8x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x2_BK -+ -+ALIGN_4 -+.L8x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_8x2 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,2 -+#endif -+ALIGN_4 -+brctg BM_CUR,.L8x2_BM -+ -+ALIGN_2 -+.L4x2: -+ -+tmll BM,4 -+jz .L2x2 -+ -+ALIGN_4 -+.L4x2_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,4,2 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_4x2 -+cijle LOCAL_VAR1,0,.L4x2_mod -+ -+ALIGN_4 -+.L4x2_4_BK: /*BK_CUR LOOP */ -+ CALC_4x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x2_4_BK -+ -+ALIGN_4 -+.L4x2_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,4,2 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L4x2_BK_Store -+ -+ALIGN_4 -+.L4x2_BK: /*BK_CUR LOOP */ -+ CALC_4x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x2_BK -+ -+ALIGN_4 -+.L4x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_4x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,2 -+#endif -+ALIGN_2 -+.L2x2: -+ -+tmll BM,2 -+jz .L1x2 -+ -+ALIGN_4 -+.L2x2_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,2 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_2x2 -+cijle LOCAL_VAR1,0,.L2x2_mod -+ -+ALIGN_4 -+.L2x2_4_BK: /*BK_CUR LOOP */ -+ CALC_2x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x2_4_BK -+ -+ALIGN_4 -+.L2x2_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,2,2 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L2x2_BK_Store -+ -+ALIGN_4 -+.L2x2_BK: /*BK_CUR LOOP */ -+ CALC_2x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x2_BK -+ -+ALIGN_4 -+.L2x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_2x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,2 -+#endif -+ -+ALIGN_2 -+.L1x2: -+ -+tmll BM,1 -+jz .Lx2_INNER_END -+ -+ALIGN_4 -+.L1x2_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,1,2 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_1x2 -+cijle LOCAL_VAR1,0,.L1x2_mod -+ -+ALIGN_4 -+.L1x2_4_BK: /*BK_CUR LOOP */ -+ CALC_1x2_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x2_4_BK -+ -+ALIGN_4 -+.L1x2_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,1,2 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L1x2_BK_Store -+ -+ALIGN_4 -+.L1x2_BK: /*BK_CUR LOOP */ -+ CALC_1x2 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x2_BK -+ -+ALIGN_4 -+.L1x2_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_1x2 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,2 -+#endif -+ALIGN_2 -+.Lx2_INNER_END: -+/*add LDC_BYTE_COPY to new*/ -+la LOCAL_VAR1,0(LDC_BYTE,LDC_BYTE) /*multiply*2 */ -+sllg LOCAL_VAR2,BK,4 /*muyliply*2*sizeof(double) =multiply*16* 2**4 */ -+la CIJ,0(CIJ,LOCAL_VAR1) /*refresh CIJ=CIJ+LDC_BYTE*4*/ -+#if defined(TRMMKERNEL) && !defined(LEFT) -+ aghi OFF,2 -+#endif -+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*4*sizeof(double) */ -+ -+ -+ -+ -+/*********************************X1 SECTION************************************************/ -+ALIGN_2 -+.Lx1: -+tmll BN,1 -+jz .L_FUNC_END -+ -+ALIGN_4 -+.Lx1_BN: -+ -+#if defined(TRMMKERNEL) && defined(LEFT) -+ /*off = offset;*/ -+ lgdr OFF,OFFSET -+#endif -+srlg BM_CUR,BM,3 -+lgr LOCAL_VAR3,A -+lgr CIJ_LOCAL,CIJ -+cijle BM_CUR,0,.L4x1 -+ -+ -+ALIGN_4 -+.L8x1_BM: /*BM_CUR LOOP */ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,8,1 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,8,1 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_8x1 -+cijle LOCAL_VAR1,0,.L8x1_mod -+ -+ALIGN_4 -+.L8x1_4_BK: /*BK_CUR LOOP */ -+#if defined(PREFETCH_INS) -+ pfd 1, 256(LOCAL_VAR3) -+#endif -+ CALC_8x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x1_4_BK -+ -+ALIGN_4 -+.L8x1_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,8,1 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L8x1_BK_Store -+ -+ALIGN_4 -+.L8x1_BK: /*BK_CUR LOOP */ -+ CALC_8x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L8x1_BK -+ -+ALIGN_4 -+.L8x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_8x1 ALPHA_VECT,CIJ_LOCAL, LDC_BYTE -+ #if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,8,1 -+#endif -+ALIGN_4 -+brctg BM_CUR,.L8x1_BM -+ -+ALIGN_2 -+.L4x1: -+ -+tmll BM,4 -+jz .L2x1 -+ -+ALIGN_4 -+.L4x1_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,4,1 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_4x1 -+cijle LOCAL_VAR1,0,.L4x1_mod -+ -+ALIGN_4 -+.L4x1_4_BK: /*BK_CUR LOOP */ -+ CALC_4x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x1_4_BK -+ -+ALIGN_4 -+.L4x1_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,4,1 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L4x1_BK_Store -+ -+ALIGN_4 -+.L4x1_BK: /*BK_CUR LOOP */ -+ CALC_4x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L4x1_BK -+ -+ALIGN_4 -+.L4x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_4x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+ #if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,4,1 -+#endif -+ALIGN_2 -+.L2x1: -+ -+tmll BM,2 -+jz .L1x1 -+ -+ALIGN_4 -+.L2x1_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,2,1 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_2x1 -+cijle LOCAL_VAR1,0,.L2x1_mod -+ -+ALIGN_4 -+.L2x1_4_BK: /*BK_CUR LOOP */ -+ CALC_2x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x1_4_BK -+ -+ALIGN_4 -+.L2x1_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,2,1 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L2x1_BK_Store -+ -+ALIGN_4 -+.L2x1_BK: /*BK_CUR LOOP */ -+ CALC_2x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L2x1_BK -+ -+ALIGN_4 -+.L2x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_2x1 ALPHA_VECT ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,2,1 -+#endif -+ -+ALIGN_2 -+.L1x1: -+ -+tmll BM, 1 -+jz .Lx1_INNER_END -+ -+ALIGN_4 -+.L1x1_BM: /*BM start*/ -+#if defined(TRMMKERNEL) -+ /* RefreshPointers PTR_A,PTR_B,OFF_VAL,B_VAL,C_A,C_B */ -+ RefreshPointers LOCAL_VAR3,LOCAL_VAR2,OFF,B,1,1 -+ RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 -+ srl LOCAL_VAR1,2 -+ -+#else -+srlg LOCAL_VAR1,BK,2 /*refresh BK*/ -+lgr LOCAL_VAR2,B /*refresh BPOINT*/ -+#endif -+ZERO_CVEC_1x1 -+cijle LOCAL_VAR1,0,.L1x1_mod -+ -+ALIGN_4 -+.L1x1_4_BK: /*BK_CUR LOOP */ -+ CALC_1x1_4 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x1_4_BK -+ -+ALIGN_4 -+.L1x1_mod: -+#if defined(TRMMKERNEL) -+ RefreshTempBk LOCAL_VAR1,BK,OFF,1,1 -+ nill LOCAL_VAR1,3 -+#else -+la LOCAL_VAR1,3(0,0) -+NGR LOCAL_VAR1,BK /*refresh BK*/ -+#endif -+jz .L1x1_BK_Store -+ -+ALIGN_4 -+.L1x1_BK: /*BK_CUR LOOP */ -+ CALC_1x1 LOCAL_VAR3,LOCAL_VAR2 -+brctg LOCAL_VAR1,.L1x1_BK -+ -+ALIGN_4 -+.L1x1_BK_Store: -+/*store C and use LDC_BYTE AND CIJ_COPY for mem storing*/ -+STORE_1x1 ALPHA ,CIJ_LOCAL, LDC_BYTE -+#if defined(TRMMKERNEL) -+ RefreshPointersAndOFF LOCAL_VAR1,BK,OFF,LOCAL_VAR2,LOCAL_VAR3,1,1 -+#endif -+ALIGN_2 -+.Lx1_INNER_END: -+/*add LDC_BYTE_COPY to new*/ -+sllg LOCAL_VAR2,BK,3 /*muyliply*2*sizeof(double) =multiply*8* 2**3 */ -+la CIJ,0(CIJ,LDC_BYTE) /*refresh CIJ=CIJ+LDC_BYTE */ -+#if defined(TRMMKERNEL) && !defined(LEFT) -+ aghi OFF,1 -+#endif -+la B,0(B,LOCAL_VAR2) /*refresh B=B+Bk*1*sizeof(double) */ -+ -+ -+ALIGN_2 -+.L_FUNC_END: -+/*end*/ -+#if defined(TRMMKERNEL) -+ld %f8,32(%r15) -+lmg %r6,%r13,40(%r15) -+#else -+lmg %r6,%r12,40(%r15) -+#endif -+br %r14 -+.end -+ -+ -+ -+ -+ -+ -+ -diff --git a/param.h b/param.h -index 0268fb5e..d28c63a9 100644 ---- a/param.h -+++ b/param.h -@@ -2548,6 +2548,46 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #define SYMV_P 16 - #endif - -+#if defined(Z13) -+#define SNUMOPT 2 -+#define DNUMOPT 4 -+ -+#define GEMM_DEFAULT_OFFSET_A 0 -+#define GEMM_DEFAULT_OFFSET_B 0 -+#define GEMM_DEFAULT_ALIGN 0x03fffUL -+ -+#define SGEMM_DEFAULT_UNROLL_M 2 -+#define SGEMM_DEFAULT_UNROLL_N 2 -+ -+#define DGEMM_DEFAULT_UNROLL_M 8 -+#define DGEMM_DEFAULT_UNROLL_N 4 -+ -+#define CGEMM_DEFAULT_UNROLL_M 2 -+#define CGEMM_DEFAULT_UNROLL_N 2 -+ -+#define ZGEMM_DEFAULT_UNROLL_M 2 -+#define ZGEMM_DEFAULT_UNROLL_N 2 -+ -+#define SGEMM_DEFAULT_P 128 -+ #define DGEMM_DEFAULT_P 320 -+#define CGEMM_DEFAULT_P 96 -+#define ZGEMM_DEFAULT_P 64 -+ -+#define SGEMM_DEFAULT_Q 240 -+#define DGEMM_DEFAULT_Q 384 -+#define CGEMM_DEFAULT_Q 120 -+#define ZGEMM_DEFAULT_Q 120 -+ -+#define SGEMM_DEFAULT_R 12288 -+#define DGEMM_DEFAULT_R 4096 -+#define CGEMM_DEFAULT_R 4096 -+#define ZGEMM_DEFAULT_R 4096 -+ -+ -+#define SYMV_P 16 -+#endif -+ -+ - - #ifdef GENERIC - --- -2.12.2 - - -From b489d350a1340d4aec3d2a7f9a97a588c118d670 Mon Sep 17 00:00:00 2001 -From: Abdurrauf -Date: Wed, 4 Jan 2017 19:41:24 +0400 -Subject: [PATCH 4/6] Update README.md (cherry picked from commit - 7f2a959e3eb7ce1a91a0f685021e3be0d9ee0552) - ---- - README.md | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - -diff --git a/README.md b/README.md -index 5428f0eb..af30a0c8 100644 ---- a/README.md -+++ b/README.md -@@ -107,9 +107,12 @@ Please read GotoBLAS_01Readme.txt - - **ARM Cortex-A57**: Experimental - - #### IBM zEnterprise System: --- **Z13**: Double precision real number -- git checkout z13 -- make USE_TRMM=1 -+- **Z13**: blas3 for double -+``` -+ git checkout z13 -+ make USE_TRMM=1 -+``` -+ - - ### Support OS: - - **GNU/Linux** --- -2.12.2 - - -From 0ba111288df793cafce7cb159d3a0e005cd59dfb Mon Sep 17 00:00:00 2001 -From: Zhang Xianyi -Date: Mon, 9 Jan 2017 05:48:09 -0500 -Subject: [PATCH 5/6] Add USE_TRMM=1 for IBM z13 in kernel/Makefile.L3 - -(cherry picked from commit 864e202afdc9761637b442f084f0f26039256fa4) ---- - README.md | 6 +----- - kernel/Makefile.L3 | 4 ++++ - 2 files changed, 5 insertions(+), 5 deletions(-) - -diff --git a/README.md b/README.md -index af30a0c8..1c3255fe 100644 ---- a/README.md -+++ b/README.md -@@ -107,11 +107,7 @@ Please read GotoBLAS_01Readme.txt - - **ARM Cortex-A57**: Experimental - - #### IBM zEnterprise System: --- **Z13**: blas3 for double --``` -- git checkout z13 -- make USE_TRMM=1 --``` -+- **Z13**: blas3 for double - - - ### Support OS: -diff --git a/kernel/Makefile.L3 b/kernel/Makefile.L3 -index e55f153f..86e692e5 100644 ---- a/kernel/Makefile.L3 -+++ b/kernel/Makefile.L3 -@@ -36,6 +36,10 @@ ifeq ($(CORE), POWER8) - USE_TRMM = 1 - endif - -+ifeq ($(CORE), Z13) -+USE_TRMM = 1 -+endif -+ - - - --- -2.12.2 - - -From 02459e22d3b8b34dbaea5d7e2e822d3c47b8cdef Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Dan=20Hor=C3=A1k?= -Date: Thu, 20 Apr 2017 21:13:41 +0200 -Subject: [PATCH 6/6] detect CPU on zArch - -(cherry picked from commit 81fed55782f0dd04649b1f0c4a44de85ac20162f) ---- - cpuid_zarch.c | 24 +++++++++++++++++++++--- - 1 file changed, 21 insertions(+), 3 deletions(-) - -diff --git a/cpuid_zarch.c b/cpuid_zarch.c -index e2e3b046..4e193542 100644 ---- a/cpuid_zarch.c -+++ b/cpuid_zarch.c -@@ -42,9 +42,27 @@ static char *cpuname_lower[] = { - - int detect(void) - { -- // return CPU_GENERIC; -- return CPU_Z13; -- -+ FILE *infile; -+ char buffer[512], *p; -+ -+ p = (char *)NULL; -+ infile = fopen("/proc/sysinfo", "r"); -+ while (fgets(buffer, sizeof(buffer), infile)){ -+ if (!strncmp("Type", buffer, 4)){ -+ p = strchr(buffer, ':') + 2; -+#if 0 -+ fprintf(stderr, "%s\n", p); -+#endif -+ break; -+ } -+ } -+ -+ fclose(infile); -+ -+ if (strstr(p, "2964")) return CPU_Z13; -+ if (strstr(p, "2965")) return CPU_Z13; -+ -+ return CPU_GENERIC; - } - - void get_libname(void) --- -2.12.2 - diff --git a/openblas.spec b/openblas.spec index dd4199c..3077a85 100644 --- a/openblas.spec +++ b/openblas.spec @@ -1,6 +1,6 @@ %bcond_with system_lapack # Version of bundled lapack -%global lapackver 3.5.0 +%global lapackver 3.7.0 # DO NOT "CLEAN UP" OR MODIFY THIS SPEC FILE WITHOUT ASKING THE # MAINTAINER FIRST! @@ -14,8 +14,8 @@ # "obsoleted" features are still kept in the spec. Name: openblas -Version: 0.2.19 -Release: 12%{?dist} +Version: 0.2.20 +Release: 1%{?dist} Summary: An optimized BLAS library based on GotoBLAS2 Group: Development/Libraries License: BSD @@ -29,10 +29,6 @@ Patch1: openblas-0.2.5-libname.patch Patch2: openblas-0.2.15-constructor.patch # Supply the proper flags to the test makefile Patch3: openblas-0.2.19-tests.patch -# From https://github.com/xianyi/OpenBLAS/issues/1078#issuecomment-279527810 -Patch4: openblas-0.2.19-fix_register_clobbers.patch -# Backported support for s390x from the develop branch -Patch5: openblas-0.2.19-s390x.patch BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) @@ -238,8 +234,6 @@ cd OpenBLAS-%{version} %patch2 -p1 -b .constructor %endif %patch3 -p1 -b .tests -%patch4 -p1 -b .register_clobbers -%patch5 -p1 -b .s390x # Fix source permissions find -name \*.f -exec chmod 644 {} \; @@ -655,6 +649,9 @@ rm -rf %{buildroot} %endif %changelog +* Fri Jul 28 2017 Susi Lehtola - 0.2.20-1 +- Update to 0.2.20. + * Thu Jul 27 2017 Fedora Release Engineering - 0.2.19-12 - Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild diff --git a/sources b/sources index f506c41..c7f66b4 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -28c998054fd377279741c6f0b9ea7941 v0.2.19.tar.gz +SHA512 (v0.2.20.tar.gz) = 8dfc8e8c8d456b834d2e9544c8eadd9f4770e30db8b8dd76af601ec0735fd86c9cf63dd6a03ccd23fc02ec2e05069a09875b9073dfe29f99aadab3a958ae2634 From 356e5c2feea59b06f212b965c5740e53237bdc3f Mon Sep 17 00:00:00 2001 From: Susi Lehtola Date: Sun, 30 Jul 2017 07:32:33 +0200 Subject: [PATCH 8/9] Remove installed pkgconfig file. --- openblas.spec | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openblas.spec b/openblas.spec index 3077a85..ed64287 100644 --- a/openblas.spec +++ b/openblas.spec @@ -546,6 +546,8 @@ done # Get rid of generated CMake config rm -rf %{buildroot}%{_libdir}/cmake +# Get rid of generated pkgconfig file +rm %{buildroot}/usr/lib/pkgconfig/openblas.pc %post -p /sbin/ldconfig %postun -p /sbin/ldconfig From 5f1847d517499cf5a9cfb54d4045786b7bcaa892 Mon Sep 17 00:00:00 2001 From: Susi Lehtola Date: Sun, 30 Jul 2017 21:09:03 +0200 Subject: [PATCH 9/9] New try. --- openblas.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openblas.spec b/openblas.spec index ed64287..5cdb2cb 100644 --- a/openblas.spec +++ b/openblas.spec @@ -546,8 +546,8 @@ done # Get rid of generated CMake config rm -rf %{buildroot}%{_libdir}/cmake -# Get rid of generated pkgconfig file -rm %{buildroot}/usr/lib/pkgconfig/openblas.pc +# Get rid of generated pkgconfig +rm -rf %{buildroot}%{_libdir}/pkgconfig %post -p /sbin/ldconfig %postun -p /sbin/ldconfig