diff -rNU3 dist.orig/.gitignore dist/.gitignore
--- dist.orig/.gitignore 2013-01-10 16:48:46.000000000 +0100
+++ dist/.gitignore 1970-01-01 01:00:00.000000000 +0100
@@ -1,42 +0,0 @@
-*.diff
-*.patch
-*.orig
-*.rej
-
-*~
-.#*
-*#
-
-*.flt
-*.gmo
-*.info
-*.la
-*.lo
-*.o
-*.pyc
-*.tmp
-
-.deps
-.libs
-
-autom4te.cache
-config.cache
-config.h
-config.intl
-config.log
-config.status
-libtool
-POTFILES
-*-POTFILES
-
-TAGS
-TAGS.sub
-
-.gdbinit
-.gdb_history
-
-# ignore core files, but not java/net/protocol/core/
-core
-!core/
-
-lost+found
diff -rNU3 dist.orig/config.guess dist/config.guess
--- dist.orig/config.guess 2014-04-04 15:48:08.000000000 +0200
+++ dist/config.guess 2015-10-18 13:19:49.000000000 +0200
@@ -171,16 +171,32 @@
UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
/usr/sbin/$sysctl 2>/dev/null || echo unknown)`
case "${UNAME_MACHINE_ARCH}" in
+ earm*eb*) machine=armeb-unknown ;;
+ earm*) machine=arm-unknown ;;
armeb) machine=armeb-unknown ;;
arm*) machine=arm-unknown ;;
+ coldfire) machine=m5407-unknown ;;
+ earm*eb*) machine=armeb-unknown ;;
+ earm*) machine=arm-unknown ;;
sh3el) machine=shl-unknown ;;
sh3eb) machine=sh-unknown ;;
sh5el) machine=sh5le-unknown ;;
*) machine=${UNAME_MACHINE_ARCH}-unknown ;;
esac
# The Operating System including object format, if it has switched
- # to ELF recently, or will in the future.
+ # to ELF recently, or will in the future and ABI.
case "${UNAME_MACHINE_ARCH}" in
+ coldfire) os=netbsdelf ;;
+ earm*)
+ eval $set_cc_for_build
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ os=netbsdelf-eabi
+ else
+ os=netbsdelf-eabihf
+ fi
+ ;;
arm*|i386|m68k|ns32k|sh3*|sparc|vax)
eval $set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
diff -rNU3 dist.orig/config.sub dist/config.sub
--- dist.orig/config.sub 2014-04-04 15:48:08.000000000 +0200
+++ dist/config.sub 2015-10-18 13:19:49.000000000 +0200
@@ -117,7 +117,7 @@
case $maybe_os in
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
- knetbsd*-gnu* | netbsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
@@ -297,7 +297,7 @@
| nios | nios2 | nios2eb | nios2el \
| ns16k | ns32k \
| open8 \
- | or1k | or32 \
+ | or1k | or1knd \
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle \
| pyramid \
@@ -329,12 +329,21 @@
basic_machine=$basic_machine-unknown
os=-none
;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | m5407 \
+ | v70 | w65 | z8k)
;;
ms1)
basic_machine=mt-unknown
;;
+ riscv32-*)
+ basic_machine=riscv32-ucb
+ ;;
+
+ riscv*-*)
+ basic_machine=riscv-ucb
+ ;;
+
strongarm | thumb | xscale)
basic_machine=arm-unknown
;;
@@ -386,6 +395,7 @@
| le32-* | le64-* \
| lm32-* \
| m32c-* | m32r-* | m32rle-* \
+ | m5200-* | m5407-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
| m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
| microblaze-* | microblazeel-* \
@@ -920,8 +930,11 @@
basic_machine=hppa1.1-oki
os=-proelf
;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
+ or1k | or1k-*)
+ basic_machine=or1k-unknown
+ ;;
+ or1knd | or1knd-*)
+ basic_machine=or1knd-unknown
;;
os400)
basic_machine=powerpc-ibm
@@ -1597,8 +1610,8 @@
or1k-*)
os=-elf
;;
- or32-*)
- os=-coff
+ or1knd-*)
+ os=-elf
;;
*-tti) # must be before sparc entry or we get the wrong os.
os=-sysv3
diff -rNU3 dist.orig/configure dist/configure
--- dist.orig/configure 2015-05-03 19:26:29.000000000 +0200
+++ dist/configure 2015-10-18 13:19:49.000000000 +0200
@@ -2292,7 +2292,7 @@
for ac_t in install-sh install.sh shtool; do
if test -f "$ac_dir/$ac_t"; then
ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/$ac_t -c"
+ ac_install_sh="$SHELL $ac_aux_dir/$ac_t -c"
break 2
fi
done
@@ -6287,8 +6287,12 @@
if test $target_elf = yes; then :
# ELF platforms build the lto-plugin always.
- build_lto_plugin=yes
-
+ case $target in
+ m68010-*)
+ build_lto_plugin=no;;
+ *)
+ build_lto_plugin=yes;;
+ esac
else
if test x"$default_enable_lto" = x"yes" ; then
case $target in
diff -rNU3 dist.orig/configure.ac dist/configure.ac
--- dist.orig/configure.ac 2015-05-03 19:26:29.000000000 +0200
+++ dist/configure.ac 2015-10-18 13:19:49.000000000 +0200
@@ -1685,7 +1685,12 @@
enable_lto=yes; default_enable_lto=yes)
ACX_ELF_TARGET_IFELSE([# ELF platforms build the lto-plugin always.
- build_lto_plugin=yes
+ case $target in
+ m68010-*)
+ build_lto_plugin=no;;
+ *)
+ build_lto_plugin=yes;;
+ esac
],[if test x"$default_enable_lto" = x"yes" ; then
case $target in
*-apple-darwin9* | *-cygwin* | *-mingw*) ;;
diff -rNU3 dist.orig/fixincludes/configure dist/fixincludes/configure
--- dist.orig/fixincludes/configure 2012-05-29 21:28:57.000000000 +0200
+++ dist/fixincludes/configure 2015-10-18 13:19:49.000000000 +0200
@@ -2141,7 +2141,7 @@
for ac_t in install-sh install.sh shtool; do
if test -f "$ac_dir/$ac_t"; then
ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/$ac_t -c"
+ ac_install_sh="$SHELL $ac_aux_dir/$ac_t -c"
break 2
fi
done
diff -rNU3 dist.orig/gcc/Makefile.in dist/gcc/Makefile.in
--- dist.orig/gcc/Makefile.in 2014-04-05 12:26:19.000000000 +0200
+++ dist/gcc/Makefile.in 2015-10-18 13:19:51.000000000 +0200
@@ -639,6 +639,9 @@
exeext = @host_exeext@
build_exeext = @build_exeext@
+# NetBSD mknative-gcc addition
+ENABLE_SHARED = @enable_shared@
+
# Directory in which to put man pages.
mandir = @mandir@
man1dir = $(mandir)/man1
@@ -706,6 +709,7 @@
# Control whether header files are installed.
INSTALL_HEADERS=install-headers install-mkheaders
+INSTALL_HEADERS=install-headers
# Control whether Info documentation is built and installed.
BUILD_INFO = @BUILD_INFO@
@@ -747,8 +751,7 @@
# Native linker and preprocessor flags. For x-fragment overrides.
BUILD_LDFLAGS=@BUILD_LDFLAGS@
-BUILD_CPPFLAGS= -I. -I$(@D) -I$(srcdir) -I$(srcdir)/$(@D) \
- -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS)
+BUILD_CPPFLAGS=$(BALL_CPPFLAGS)
# Actual name to use when installing a native compiler.
GCC_INSTALL_NAME := $(shell echo gcc|sed '$(program_transform_name)')
@@ -998,6 +1001,7 @@
# puts -I options in CPPFLAGS, our include files in the srcdir will always
# win against random include files in /usr/include.
ALL_CPPFLAGS = $(INCLUDES) $(CPPFLAGS)
+BALL_CPPFLAGS = $(BINCLUDES) $(CPPFLAGS)
# This is the variable to use when using $(COMPILER).
ALL_COMPILERFLAGS = $(ALL_CXXFLAGS)
@@ -1054,6 +1058,10 @@
-I$(srcdir)/../include @INCINTL@ \
$(CPPINC) $(GMPINC) $(DECNUMINC) $(BACKTRACEINC) \
$(CLOOGINC) $(ISLINC)
+BINCLUDES = -I. -I$(@D) -I$(srcdir) -I$(srcdir)/$(@D) \
+ -I$(srcdir)/../include @INCINTL@ \
+ $(CPPINC) $(DECNUMINC) $(BACKTRACEINC) \
+ $(CLOOGINC) $(ISLINC)
.c.o:
$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $< $(OUTPUT_OPTION)
@@ -1796,7 +1804,7 @@
checksum-options:
echo "$(LINKER) $(ALL_LINKERFLAGS) $(LDFLAGS)" > checksum-options.tmp \
- && $(srcdir)/../move-if-change checksum-options.tmp checksum-options
+ && $(SHELL) $(srcdir)/../move-if-change checksum-options.tmp checksum-options
#
# Build libgcc.a.
@@ -1804,7 +1812,7 @@
libgcc-support: libgcc.mvars stmp-int-hdrs $(TCONFIG_H) \
$(MACHMODE_H) gcov-iov.h
-libgcc.mvars: config.status Makefile specs xgcc$(exeext)
+libgcc.mvars: config.status Makefile
: > tmp-libgcc.mvars
echo GCC_CFLAGS = '$(GCC_CFLAGS)' >> tmp-libgcc.mvars
echo INHIBIT_LIBC_CFLAGS = '$(INHIBIT_LIBC_CFLAGS)' >> tmp-libgcc.mvars
@@ -1819,6 +1827,7 @@
s-mlib: $(srcdir)/genmultilib Makefile
if test @enable_multilib@ = yes \
|| test -n "$(MULTILIB_OSDIRNAMES)"; then \
+ CONFIG_SHELL="$(SHELL)" \
$(SHELL) $(srcdir)/genmultilib \
"$(MULTILIB_OPTIONS)" \
"$(MULTILIB_DIRNAMES)" \
@@ -3909,21 +3918,21 @@
gengtype-parse.o build/gengtype-parse.o : gengtype-parse.c gengtype.h \
$(SYSTEM_H)
-gengtype-parse.o: $(CONFIG_H)
+gengtype-parse.o: $(CONFIG_H) $(BCONFIG_H)
CFLAGS-gengtype-parse.o += -DGENERATOR_FILE
build/gengtype-parse.o: $(BCONFIG_H)
gengtype-state.o build/gengtype-state.o: gengtype-state.c $(SYSTEM_H) \
gengtype.h errors.h double-int.h version.h $(HASHTAB_H) $(OBSTACK_H) \
$(XREGEX_H)
-gengtype-state.o: $(CONFIG_H)
+gengtype-state.o: $(CONFIG_H) $(BCONFIG_H)
CFLAGS-gengtype-state.o += -DGENERATOR_FILE
build/gengtype-state.o: $(BCONFIG_H)
gengtype.o build/gengtype.o : gengtype.c $(SYSTEM_H) gengtype.h \
rtl.def insn-notes.def errors.h double-int.h version.h $(HASHTAB_H) \
$(OBSTACK_H) $(XREGEX_H)
-gengtype.o: $(CONFIG_H)
+gengtype.o: $(CONFIG_H) $(BCONFIG_H)
CFLAGS-gengtype.o += -DGENERATOR_FILE
build/gengtype.o: $(BCONFIG_H)
@@ -4061,6 +4070,12 @@
# s-* so that mostlyclean does not force the include directory to
# be rebuilt.
+unwind.h: $(UNWIND_H)
+ -if [ -d include ] ; then true; else mkdir include; chmod a+rx include; fi
+ rm -f include/unwind.h
+ cp $(UNWIND_H) include/unwind.h
+ chmod a+r include/unwind.h
+
# Build the include directories.
stmp-int-hdrs: $(STMP_FIXINC) $(USER_H) fixinc_list
# Copy in the headers provided with gcc.
@@ -4076,6 +4091,7 @@
# e.g. install-no-fixedincludes.
-if [ -d include ] ; then true; else mkdir include; chmod a+rx include; fi
-if [ -d include-fixed ] ; then true; else mkdir include-fixed; chmod a+rx include-fixed; fi
+ if false; then \
for file in .. $(USER_H); do \
if [ X$$file != X.. ]; then \
realfile=`echo $$file | sed -e 's|.*/\([^/]*\)$$|\1|'`; \
@@ -4084,7 +4100,7 @@
cp $$file include; \
chmod a+r include/$$realfile; \
fi; \
- done
+ done; \
for file in .. $(USER_H_INC_NEXT_PRE); do \
if [ X$$file != X.. ]; then \
mv include/$$file include/x_$$file; \
@@ -4093,14 +4109,14 @@
rm -f include/x_$$file; \
chmod a+r include/$$file; \
fi; \
- done
+ done; \
for file in .. $(USER_H_INC_NEXT_POST); do \
if [ X$$file != X.. ]; then \
echo "#include_next <$$file>" >>include/$$file; \
chmod a+r include/$$file; \
fi; \
- done
- rm -f include/stdint.h
+ done; \
+ rm -f include/stdint.h; \
if [ $(USE_GCC_STDINT) = wrap ]; then \
rm -f include/stdint-gcc.h; \
cp $(srcdir)/ginclude/stdint-gcc.h include/stdint-gcc.h; \
@@ -4110,7 +4126,7 @@
elif [ $(USE_GCC_STDINT) = provide ]; then \
cp $(srcdir)/ginclude/stdint-gcc.h include/stdint.h; \
chmod a+r include/stdint.h; \
- fi
+ fi; \
set -e; for ml in `cat fixinc_list`; do \
sysroot_headers_suffix=`echo $${ml} | sed -e 's/;.*$$//'`; \
multi_dir=`echo $${ml} | sed -e 's/^[^;]*;//'`; \
@@ -4127,7 +4143,8 @@
rm -f $${fix_dir}/limits.h; \
cp -p tmp-limits.h $${fix_dir}/limits.h; \
chmod a+r $${fix_dir}/limits.h; \
- done
+ done; \
+ fi
# Install the README
rm -f include-fixed/README
cp $(srcdir)/../fixincludes/README-fixinc include-fixed/README
@@ -4187,10 +4204,11 @@
# Abort if no system headers available, unless building a crosscompiler.
# FIXME: abort unless building --without-headers would be more accurate and less ugly
stmp-fixinc: gsyslimits.h macro_list fixinc_list \
- $(build_objdir)/fixincludes/fixincl \
+ $(build_objdir)/fixincludes/fixincl$(build_exeext) \
$(build_objdir)/fixincludes/fixinc.sh
- rm -rf include-fixed; mkdir include-fixed
- -chmod a+rx include-fixed
+ if false; then \
+ rm -rf include-fixed; mkdir include-fixed; \
+ -chmod a+rx include-fixed; \
if [ -d ../prev-gcc ]; then \
cd ../prev-gcc && \
$(MAKE) real-$(INSTALL_HEADERS_DIR) DESTDIR=`pwd`/../gcc/ \
@@ -4224,6 +4242,7 @@
fi; \
chmod a+r $${fix_dir}/syslimits.h; \
done; \
+ fi; \
fi
$(STAMP) stmp-fixinc
#
@@ -4710,6 +4729,8 @@
lang.install-info
$(DESTDIR)$(infodir)/%.info: doc/%.info installdirs
+ @echo "NOT REBUILDING $@"
+NetBSD_DISABLED_info:
rm -f $@
if [ -f $< ]; then \
for f in $(<)*; do \
diff -rNU3 dist.orig/gcc/c/Make-lang.in dist/gcc/c/Make-lang.in
--- dist.orig/gcc/c/Make-lang.in 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/c/Make-lang.in 2015-10-18 13:19:49.000000000 +0200
@@ -70,7 +70,7 @@
$(C_OBJS) $(BACKEND) $(LIBDEPS)
build/genchecksum$(build_exeext) $(C_OBJS) $(BACKEND) $(LIBDEPS) \
checksum-options > cc1-checksum.c.tmp && \
- $(srcdir)/../move-if-change cc1-checksum.c.tmp cc1-checksum.c
+ $(SHELL) $(srcdir)/../move-if-change cc1-checksum.c.tmp cc1-checksum.c
cc1-checksum.o : cc1-checksum.c $(CONFIG_H) $(SYSTEM_H)
diff -rNU3 dist.orig/gcc/c-family/c-opts.c dist/gcc/c-family/c-opts.c
--- dist.orig/gcc/c-family/c-opts.c 2015-02-11 13:14:54.000000000 +0100
+++ dist/gcc/c-family/c-opts.c 2015-10-18 13:19:49.000000000 +0200
@@ -284,6 +284,10 @@
cpp_opts->discard_comments_in_macro_exp = 0;
break;
+ case OPT_cxx_isystem:
+ add_path (xstrdup (arg), SYSTEM, 1, true);
+ break;
+
case OPT_D:
defer_opt (code, arg);
break;
@@ -606,6 +610,10 @@
add_path (xstrdup (arg), QUOTE, 0, true);
break;
+ case OPT_iremap:
+ add_cpp_remap_path (arg);
+ break;
+
case OPT_isysroot:
sysroot = arg;
break;
diff -rNU3 dist.orig/gcc/c-family/c.opt dist/gcc/c-family/c.opt
--- dist.orig/gcc/c-family/c.opt 2014-04-07 08:40:18.000000000 +0200
+++ dist/gcc/c-family/c.opt 2015-10-18 13:19:49.000000000 +0200
@@ -801,6 +801,12 @@
C ObjC C++ ObjC++
A synonym for -std=c89 (for C) or -std=c++98 (for C++)
+; This should really just be C++/ObjC++ but we (NetBSD) use it when
+; calling C and ObjC compilers as well.
+cxx-isystem
+C ObjC C++ ObjC++ Joined Separate MissingArgError(missing path after %qs)
+-cxx-isystem
Add to the start of the C++ system include path
+
d
C ObjC C++ ObjC++ Joined
; Documented in common.opt. FIXME - what about -dI, -dD, -dN and -dD?
@@ -1277,6 +1283,10 @@
C ObjC C++ ObjC++ Joined Separate MissingArgError(missing path after %qs)
-iquote Add to the end of the quote include path
+iremap
+C ObjC C++ ObjC++ Joined Separate MissingArgError(missing path after %qs)
+-iremap Convert to if it occurs as prefix in __FILE__.
+
iwithprefix
C ObjC C++ ObjC++ Joined Separate
-iwithprefix Add to the end of the system include path
diff -rNU3 dist.orig/gcc/cfgexpand.c dist/gcc/cfgexpand.c
--- dist.orig/gcc/cfgexpand.c 2015-01-27 18:07:24.000000000 +0100
+++ dist/gcc/cfgexpand.c 2015-10-18 13:19:49.000000000 +0200
@@ -1321,7 +1321,9 @@
else
len = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
- if (len < max)
+ if (len == 0)
+ ret = SPCT_HAS_ARRAY;
+ else if (len < max)
ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY;
else
ret = SPCT_HAS_LARGE_CHAR_ARRAY | SPCT_HAS_ARRAY;
diff -rNU3 dist.orig/gcc/common/config/arm/arm-common.c dist/gcc/common/config/arm/arm-common.c
--- dist.orig/gcc/common/config/arm/arm-common.c 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/common/config/arm/arm-common.c 2015-10-18 13:19:49.000000000 +0200
@@ -48,6 +48,9 @@
return UI_SJLJ;
#endif
+ if (ARM_DWARF_UNWIND_TABLES)
+ return UI_DWARF2;
+
/* If not using ARM EABI unwind tables... */
if (ARM_UNWIND_INFO)
{
diff -rNU3 dist.orig/gcc/common/config/or1k/or1k-common.c dist/gcc/common/config/or1k/or1k-common.c
--- dist.orig/gcc/common/config/or1k/or1k-common.c 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/common/config/or1k/or1k-common.c 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,30 @@
+/* Common hooks for VAX.
+ Copyright (C) 1987-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff -rNU3 dist.orig/gcc/common/config/riscv/riscv-common.c dist/gcc/common/config/riscv/riscv-common.c
--- dist.orig/gcc/common/config/riscv/riscv-common.c 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/common/config/riscv/riscv-common.c 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,129 @@
+/* Common hooks for RISC-V.
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+#include "opts.h"
+#include "flags.h"
+#include "errors.h"
+
+/* Parse a RISC-V ISA string into an option mask. */
+
+static void
+riscv_parse_arch_string (const char *isa, int *flags)
+{
+ const char *p = isa;
+
+ if (strncmp (p, "RV32", 4) == 0)
+ *flags |= MASK_32BIT, p += 4;
+ else if (strncmp (p, "RV64", 4) == 0)
+ *flags &= ~MASK_32BIT, p += 4;
+
+ if (*p++ != 'I')
+ {
+ error ("-march=%s: ISA strings must begin with I, RV32I, or RV64I", isa);
+ return;
+ }
+
+ *flags &= ~MASK_MULDIV;
+ if (*p == 'M')
+ *flags |= MASK_MULDIV, p++;
+
+ *flags &= ~MASK_ATOMIC;
+ if (*p == 'A')
+ *flags |= MASK_ATOMIC, p++;
+
+ *flags |= MASK_SOFT_FLOAT_ABI;
+ if (*p == 'F')
+ *flags &= ~MASK_SOFT_FLOAT_ABI, p++;
+
+ if (*p == 'D')
+ {
+ p++;
+ if (!TARGET_HARD_FLOAT)
+ {
+ error ("-march=%s: the D extension requires the F extension", isa);
+ return;
+ }
+ }
+ else if (TARGET_HARD_FLOAT)
+ {
+ error ("-march=%s: single-precision-only is not yet supported", isa);
+ return;
+ }
+
+ if (*p)
+ {
+ error ("-march=%s: unsupported ISA substring %s", isa, p);
+ return;
+ }
+}
+
+static int
+riscv_flags_from_arch_string (const char *isa)
+{
+ int flags = 0;
+ riscv_parse_arch_string (isa, &flags);
+ return flags;
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+riscv_handle_option (struct gcc_options *opts,
+ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
+ const struct cl_decoded_option *decoded,
+ location_t loc ATTRIBUTE_UNUSED)
+{
+ switch (decoded->opt_index)
+ {
+ case OPT_march_:
+ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
+static const struct default_options riscv_option_optimization_table[] =
+ {
+ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+ { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT \
+ | riscv_flags_from_arch_string (RISCV_ARCH_STRING_DEFAULT) \
+ | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION riscv_handle_option
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff -rNU3 dist.orig/gcc/config/alpha/alpha.h dist/gcc/config/alpha/alpha.h
--- dist.orig/gcc/config/alpha/alpha.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/alpha/alpha.h 2015-10-18 13:19:50.000000000 +0200
@@ -1067,6 +1067,12 @@
#define ASM_OUTPUT_SOURCE_FILENAME(STREAM, NAME) \
alpha_output_filename (STREAM, NAME)
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ ( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
+
+
/* By default, turn on GDB extensions. */
#define DEFAULT_GDB_EXTENSIONS 1
diff -rNU3 dist.orig/gcc/config/alpha/elf.h dist/gcc/config/alpha/elf.h
--- dist.orig/gcc/config/alpha/elf.h 2014-07-25 09:28:47.000000000 +0200
+++ dist/gcc/config/alpha/elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -18,6 +18,29 @@
along with GCC; see the file COPYING3. If not see
. */
+#undef OBJECT_FORMAT_COFF
+#undef EXTENDED_COFF
+#define OBJECT_FORMAT_ELF
+
+/* ??? Move all SDB stuff from alpha.h to osf.h. */
+#undef SDB_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+
+#define DWARF2_DEBUGGING_INFO 1
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef ASM_FINAL_SPEC
+
+/* alpha/ doesn't use elfos.h for some reason. */
+#define TARGET_OBJFMT_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__ELF__"); \
+ } \
+ while (0)
+
#undef CC1_SPEC
#define CC1_SPEC "%{G*}"
@@ -167,6 +190,6 @@
As of Jan 2002, only glibc 2.2.4 can actually make use of this, but
I imagine that other systems will catch up. In the meantime, it
doesn't harm to make sure that the data exists to be used later. */
-#if defined(HAVE_LD_EH_FRAME_HDR)
+#if defined(HAVE_LD_EH_FRAME_HDR) && !defined(LINK_EH_SPEC)
#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
#endif
diff -rNU3 dist.orig/gcc/config/alpha/netbsd.h dist/gcc/config/alpha/netbsd.h
--- dist.orig/gcc/config/alpha/netbsd.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/alpha/netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -57,6 +57,15 @@
#define NETBSD_ENTRY_POINT "__start"
+/* Provide a STARTFILE_SPEC appropriate for NetBSD. Here we add the
+ (even more) magical crtbegin.o file which provides part of the
+ support for getting C++ file-scope static object constructed
+ before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: %{pg|p:gcrt0.o%s;:crt0.o%s}}\
+ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
/* Provide an ENDFILE_SPEC appropriate for NetBSD/alpha ELF. Here we
add crtend.o, which provides part of the support for getting
diff -rNU3 dist.orig/gcc/config/arm/arm.h dist/gcc/config/arm/arm.h
--- dist.orig/gcc/config/arm/arm.h 2015-01-14 12:02:24.000000000 +0100
+++ dist/gcc/config/arm/arm.h 2015-10-18 13:19:50.000000000 +0200
@@ -889,6 +889,11 @@
#define ARM_UNWIND_INFO 0
#endif
+/* Overriden by config/arm/netbsd-eabi.h. */
+#ifndef ARM_DWARF_UNWIND_TABLES
+#define ARM_DWARF_UNWIND_TABLES 0
+#endif
+
/* Use r0 and r1 to pass exception handling information. */
#define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? N : INVALID_REGNUM)
@@ -899,11 +904,21 @@
#ifndef ARM_TARGET2_DWARF_FORMAT
#define ARM_TARGET2_DWARF_FORMAT DW_EH_PE_pcrel
+# if ARM_DWARF_UNWIND_TABLES
+/* DWARF unwinding uses the normal indirect/pcrel vs absptr format
+ for 32bit platforms. */
+# define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ ((flag_pic \
+ && ((GLOBAL) || (CODE))) \
+ ? ((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4 \
+ : DW_EH_PE_absptr)
+# else
/* ttype entries (the only interesting data references used)
use TARGET2 relocations. */
-#define ASM_PREFERRED_EH_DATA_FORMAT(code, data) \
- (((code) == 0 && (data) == 1 && ARM_UNWIND_INFO) ? ARM_TARGET2_DWARF_FORMAT \
- : DW_EH_PE_absptr)
+# define ASM_PREFERRED_EH_DATA_FORMAT(code, data) \
+ (((code) == 0 && (data) == 1 && ARM_UNWIND_INFO) ? ARM_TARGET2_DWARF_FORMAT \
+ : DW_EH_PE_absptr)
+# endif
#endif
/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
@@ -2315,7 +2330,7 @@
/* -mcpu=native handling only makes sense with compiler running on
an ARM chip. */
-#if defined(__arm__)
+#if defined(__arm__) && defined(__linux__)
extern const char *host_detect_local_cpu (int argc, const char **argv);
# define EXTRA_SPEC_FUNCTIONS \
{ "local_cpu_detect", host_detect_local_cpu },
diff -rNU3 dist.orig/gcc/config/arm/bpabi-netbsd.c dist/gcc/config/arm/bpabi-netbsd.c
--- dist.orig/gcc/config/arm/bpabi-netbsd.c 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/arm/bpabi-netbsd.c 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1 @@
+#include "bpabi.c"
diff -rNU3 dist.orig/gcc/config/arm/bpabi.h dist/gcc/config/arm/bpabi.h
--- dist.orig/gcc/config/arm/bpabi.h 2013-01-18 15:26:15.000000000 +0100
+++ dist/gcc/config/arm/bpabi.h 2015-10-18 13:19:50.000000000 +0200
@@ -19,12 +19,16 @@
. */
/* Use the AAPCS ABI by default. */
+#undef ARM_DEFAULT_ABI
#define ARM_DEFAULT_ABI ARM_ABI_AAPCS
/* Assume that AAPCS ABIs should adhere to the full BPABI. */
+#undef TARGET_BPABI
#define TARGET_BPABI (TARGET_AAPCS_BASED)
/* BPABI targets use EABI frame unwinding tables. */
+#undef ARM_EABI_UNWIND_TABLES
+#define ARM_EABI_UNWIND_TABLES 1
#undef ARM_UNWIND_INFO
#define ARM_UNWIND_INFO 1
diff -rNU3 dist.orig/gcc/config/arm/elf.h dist/gcc/config/arm/elf.h
--- dist.orig/gcc/config/arm/elf.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/arm/elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -154,6 +154,8 @@
#undef L_floatdidf
#undef L_floatdisf
#undef L_floatundidf
+/* XXXMRG: don't take this out, we need it! */
+# ifndef __NetBSD__
#undef L_floatundisf
+# endif
#endif
-
diff -rNU3 dist.orig/gcc/config/arm/netbsd-eabi.h dist/gcc/config/arm/netbsd-eabi.h
--- dist.orig/gcc/config/arm/netbsd-eabi.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/arm/netbsd-eabi.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,113 @@
+/* Definitions of target machine for GNU compiler, NetBSD/arm ELF version.
+ Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Contributed by Wasabi Systems, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ . */
+
+/* Run-time Target Specification. */
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mabi=aapcs-linux" }
+
+#define TARGET_LINKER_EABI_SUFFIX \
+ (TARGET_DEFAULT_FLOAT_ABI == ARM_FLOAT_ABI_SOFT \
+ ? "%{!mabi=apcs-gnu:%{!mabi=atpcs:%{mfloat-abi=hard:_eabihf;:_eabi}}}" \
+ : "%{!mabi=apcs-gnu:%{!mabi=atpcs:%{mfloat-abi=soft:_eabi;:_eabihf}}}")
+#define TARGET_LINKER_BIG_EMULATION "armelfb_nbsd%(linker_eabi_suffix)"
+#define TARGET_LINKER_LITTLE_EMULATION "armelf_nbsd%(linker_eabi_suffix)"
+
+/* TARGET_BIG_ENDIAN_DEFAULT is set in
+ config.gcc for big endian configurations. */
+#undef TARGET_LINKER_EMULATION
+#if TARGET_BIG_ENDIAN_DEFAULT
+#define TARGET_LINKER_EMULATION TARGET_LINKER_BIG_EMULATION
+#undef BE8_LINK_SPEC
+#define BE8_LINK_SPEC " %{!mlittle-endian:%{march=armv7-a|mcpu=cortex-a5|mcpu=cortex-a8|mcpu=cortex-a9:%{!r:--be8}}}"
+#else
+#define TARGET_LINKER_EMULATION TARGET_LINKER_LITTLE_EMULATION
+#endif
+
+#undef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_AAPCS_LINUX
+
+#undef ARM_EABI_UNWIND_TABLES
+#define ARM_EABI_UNWIND_TABLES 0
+#undef ARM_UNWIND_INFO
+#define ARM_UNWIND_INFO 0
+#undef ARM_DWARF_UNWIND_TABLES
+#define ARM_DWARF_UNWIND_TABLES 1
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ if (TARGET_AAPCS_BASED) \
+ TARGET_BPABI_CPP_BUILTINS(); \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ if (ARM_DWARF_UNWIND_TABLES) \
+ builtin_define ("__ARM_DWARF_EH__"); \
+ if (ARM_EABI_UNWIND_TABLES) \
+ builtin_define ("__UNWIND_TABLES__"); \
+ } \
+ while (0)
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC NETBSD_CPP_SPEC
+
+/*
+ * Override AAPCS types to remain compatible the existing NetBSD types.
+ */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ "-matpcs %{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu} %{fpic|fpie:-k} %{fPIC|fPIE:-k}"
+
+/* Default to full VFP if -mhard-float is specified. */
+#undef SUBTARGET_ASM_FLOAT_SPEC
+#define SUBTARGET_ASM_FLOAT_SPEC \
+ "%{mhard-float:%{!mfpu=*:-mfpu=vfp}} \
+ %{mfloat-abi=hard:%{!mfpu=*:-mfpu=vfp}}"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "subtarget_extra_asm_spec", SUBTARGET_EXTRA_ASM_SPEC }, \
+ { "subtarget_asm_float_spec", SUBTARGET_ASM_FLOAT_SPEC }, \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "linker_eabi_suffix", TARGET_LINKER_EABI_SUFFIX }, \
+ { "linker_emulation", TARGET_LINKER_EMULATION }, \
+ { "linker_big_emulation", TARGET_LINKER_BIG_EMULATION }, \
+ { "linker_little_emulation", TARGET_LINKER_LITTLE_EMULATION }, \
+ { "be8_link_spec", BE8_LINK_SPEC }, \
+ { "target_fix_v4bx_spec", TARGET_FIX_V4BX_SPEC }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+#define NETBSD_ENTRY_POINT "__start"
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "-X %{mbig-endian:-EB -m %(linker_big_emulation)} \
+ %{mlittle-endian:-EL -m %(linker_liitle_emulation)} \
+ %{!mbig-endian:%{!mlittle-endian:-m %(linker_emulation)}} \
+ %(be8_link_spec) %(target_fix_v4bx_spec) \
+ %(netbsd_link_spec)"
diff -rNU3 dist.orig/gcc/config/arm/netbsd-elf.h dist/gcc/config/arm/netbsd-elf.h
--- dist.orig/gcc/config/arm/netbsd-elf.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/arm/netbsd-elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -22,9 +22,20 @@
/* arm.h defaults to ARM6 CPU. */
-/* This defaults us to little-endian. */
-#ifndef TARGET_ENDIAN_DEFAULT
-#define TARGET_ENDIAN_DEFAULT 0
+/* Default EABI to armv5t so that thumb shared libraries work.
+ The ARM926EH-S core is the default for armv5te, so set
+ SUBTARGET_CPU_DEFAULT to achieve this. */
+
+#define SUBTARGET_CPU_DEFAULT \
+ (ARM_DEFAULT_ABI != ARM_ABI_APCS && ARM_DEFAULT_ABI != ARM_ABI_ATPCS \
+ ? TARGET_CPU_arm926ejs : TARGET_CPU_arm6)
+
+/* TARGET_BIG_ENDIAN_DEFAULT is set in
+ config.gcc for big endian configurations. */
+#if TARGET_BIG_ENDIAN_DEFAULT
+#define TARGET_ENDIAN_DEFAULT MASK_BIG_END
+#else
+#define TARGET_ENDIAN_DEFAULT 0
#endif
#undef MULTILIB_DEFAULTS
@@ -38,6 +49,7 @@
#undef ARM_DEFAULT_ABI
#define ARM_DEFAULT_ABI ARM_ABI_ATPCS
+#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
@@ -50,12 +62,13 @@
#undef SUBTARGET_EXTRA_ASM_SPEC
#define SUBTARGET_EXTRA_ASM_SPEC \
- "-matpcs %{fpic|fpie:-k} %{fPIC|fPIE:-k}"
+ "-matpcs %{mabi=aapcs*:-meabi=5} %{fpic|fpie:-k} %{fPIC|fPIE:-k}"
/* Default to full VFP if -mfloat-abi=hard is specified. */
#undef SUBTARGET_ASM_FLOAT_SPEC
#define SUBTARGET_ASM_FLOAT_SPEC \
- "%{mfloat-abi=hard:{!mfpu=*:-mfpu=vfp}}"
+ "%{mhard-float:%{!mfpu=*:-mfpu=vfp}} \
+ %{mfloat-abi=hard:%{!mfpu=*:-mfpu=vfp}}"
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
@@ -68,7 +81,9 @@
#undef LINK_SPEC
#define LINK_SPEC \
- "-X %{mbig-endian:-EB} %{mlittle-endian:-EL} \
+ "-X \
+ %{mbig-endian:-EB %{-mabi=aapcs*:-m armelfb_nbsd_eabi}} \
+ %{mlittle-endian:-EL %{-mabi=aapcs*:-m armelf_nbsd_eabi}} \
%(netbsd_link_spec)"
/* Make GCC agree with . */
@@ -79,6 +94,12 @@
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "long int"
+#undef INTPTR_TYPE
+#define INTPTR_TYPE PTRDIFF_TYPE
+
+#undef UINTPTR_TYPE
+#define UINTPTR_TYPE SIZE_TYPE
+
/* We don't have any limit on the length as out debugger is GDB. */
#undef DBX_CONTIN_LENGTH
diff -rNU3 dist.orig/gcc/config/arm/t-arm dist/gcc/config/arm/t-arm
--- dist.orig/gcc/config/arm/t-arm 2013-03-07 00:29:08.000000000 +0100
+++ dist/gcc/config/arm/t-arm 2015-10-18 13:19:50.000000000 +0200
@@ -66,6 +66,8 @@
$(srcdir)/config/arm/arm-tune.md: $(srcdir)/config/arm/gentune.sh \
$(srcdir)/config/arm/arm-cores.def
+ @echo "NOT REBUILDING $@"
+NetBSD_DISABLED_config_arm_arm-tune.md:
$(SHELL) $(srcdir)/config/arm/gentune.sh \
$(srcdir)/config/arm/arm-cores.def > \
$(srcdir)/config/arm/arm-tune.md
@@ -73,6 +75,8 @@
$(srcdir)/config/arm/arm-tables.opt: $(srcdir)/config/arm/genopt.sh \
$(srcdir)/config/arm/arm-cores.def $(srcdir)/config/arm/arm-arches.def \
$(srcdir)/config/arm/arm-fpus.def
+ @echo "NOT REBUILDING $@"
+NetBSD_DISABLED_config_arm_arm-tables.opt:
$(SHELL) $(srcdir)/config/arm/genopt.sh $(srcdir)/config/arm > \
$(srcdir)/config/arm/arm-tables.opt
diff -rNU3 dist.orig/gcc/config/arm/t-netbsdeabi dist/gcc/config/arm/t-netbsdeabi
--- dist.orig/gcc/config/arm/t-netbsdeabi 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/arm/t-netbsdeabi 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,8 @@
+# NetBSD has (will have) "non-native" libraries in /usr/lib/.
+
+MULTILIB_OPTIONS = mabi=aapcs-linux/mabi=apcs-gnu
+MULTILIB_DIRNAMES = eabi oabi
+MULTILIB_OSDIRNAMES = . ../lib/oabi
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff -rNU3 dist.orig/gcc/config/freebsd-spec.h dist/gcc/config/freebsd-spec.h
--- dist.orig/gcc/config/freebsd-spec.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/freebsd-spec.h 2015-10-18 13:19:50.000000000 +0200
@@ -133,6 +133,14 @@
#define FBSD_DYNAMIC_LINKER "/libexec/ld-elf.so.1"
#endif
+#if defined(HAVE_LD_EH_FRAME_HDR) && !defined(LINK_EH_SPEC)
+#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#endif
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
/* NOTE: The freebsd-spec.h header is included also for various
non-FreeBSD powerpc targets, thus it should never define macros
other than FBSD_* prefixed ones, or USING_CONFIG_FREEBSD_SPEC. */
diff -rNU3 dist.orig/gcc/config/host-netbsd.c dist/gcc/config/host-netbsd.c
--- dist.orig/gcc/config/host-netbsd.c 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/host-netbsd.c 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,85 @@
+/* NetBSD host-specific hook definitions.
+ Copyright (C) 2004-2013 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ . */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+
+
+#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
+#define HOST_HOOKS_GT_PCH_GET_ADDRESS netbsd_gt_pch_get_address
+#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS netbsd_gt_pch_use_address
+
+/* For various ports, try to guess a fixed spot in the vm space
+ that's probably free. */
+#if defined(__sparc64__)
+# define TRY_EMPTY_VM_SPACE 0x40000000000
+#elif defined(_LP64)
+# define TRY_EMPTY_VM_SPACE 0x400000000000
+#elif defined(__mips__) || defined(__vax__) || defined (__arm__)
+# define TRY_EMPTY_VM_SPACE 0x60000000
+#else
+# define TRY_EMPTY_VM_SPACE 0xb0000000
+#endif
+
+/* Determine a location where we might be able to reliably allocate
+ SIZE bytes. FD is the PCH file, though we should return with the
+ file unmapped. */
+
+static void *
+netbsd_gt_pch_get_address (size_t size, int fd)
+{
+ void *addr;
+
+ addr = mmap ((void *) TRY_EMPTY_VM_SPACE, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED, fd, 0);
+
+ /* If we failed the map, that means there's *no* free space. */
+ if (addr == (void *) MAP_FAILED)
+ return NULL;
+ /* Unmap the area before returning. */
+ munmap (addr, size);
+
+ return addr;
+}
+
+/* Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
+ mapping the data at BASE, -1 if we couldn't. */
+
+static int
+netbsd_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
+{
+ void *addr;
+
+ /* We're called with size == 0 if we're not planning to load a PCH
+ file at all. This allows the hook to free any static space that
+ we might have allocated at link time. */
+ if (size == 0)
+ return -1;
+
+ addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd, offset);
+
+ return addr == base ? 1 : -1;
+}
+
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff -rNU3 dist.orig/gcc/config/i386/i386.h dist/gcc/config/i386/i386.h
--- dist.orig/gcc/config/i386/i386.h 2014-01-08 20:54:29.000000000 +0100
+++ dist/gcc/config/i386/i386.h 2015-10-18 13:19:50.000000000 +0200
@@ -1071,6 +1071,7 @@
#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
(CC_REGNO_P (REGNO) ? VOIDmode \
+ : MMX_REGNO_P (REGNO) ? V8QImode \
: (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
: (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false) \
: (MODE) == HImode && !TARGET_PARTIAL_REG_STALL ? SImode \
diff -rNU3 dist.orig/gcc/config/i386/netbsd-elf.h dist/gcc/config/i386/netbsd-elf.h
--- dist.orig/gcc/config/i386/netbsd-elf.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/i386/netbsd-elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -118,4 +118,10 @@
we don't care about compatibility with older gcc versions. */
#define DEFAULT_PCC_STRUCT_RETURN 1
-#define HAVE_ENABLE_EXECUTE_STACK
+#undef X87_ENABLE_ARITH
+#define X87_ENABLE_ARITH(MODE) \
+ (flag_excess_precision == EXCESS_PRECISION_FAST || (MODE) == DFmode)
+
+/* Preserve i386 psABI */
+#undef PREFERRED_STACK_BOUNDARY_DEFAULT
+#define PREFERRED_STACK_BOUNDARY_DEFAULT MIN_STACK_BOUNDARY
diff -rNU3 dist.orig/gcc/config/i386/netbsd64.h dist/gcc/config/i386/netbsd64.h
--- dist.orig/gcc/config/i386/netbsd64.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/i386/netbsd64.h 2015-10-18 13:19:50.000000000 +0200
@@ -66,4 +66,8 @@
fprintf (FILE, "\tcall __mcount\n"); \
}
+/* Preserve i386 psABI */
+#undef PREFERRED_STACK_BOUNDARY_DEFAULT
+#define PREFERRED_STACK_BOUNDARY_DEFAULT MIN_STACK_BOUNDARY
+
#define HAVE_ENABLE_EXECUTE_STACK
diff -rNU3 dist.orig/gcc/config/i386/pmm_malloc.h dist/gcc/config/i386/pmm_malloc.h
--- dist.orig/gcc/config/i386/pmm_malloc.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/i386/pmm_malloc.h 2015-10-18 13:19:50.000000000 +0200
@@ -31,7 +31,7 @@
#ifndef __cplusplus
extern int posix_memalign (void **, size_t, size_t);
#else
-extern "C" int posix_memalign (void **, size_t, size_t) throw ();
+extern "C" int posix_memalign (void **, size_t, size_t);
#endif
static __inline void *
diff -rNU3 dist.orig/gcc/config/i386/t-netbsd64 dist/gcc/config/i386/t-netbsd64
--- dist.orig/gcc/config/i386/t-netbsd64 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/i386/t-netbsd64 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,15 @@
+# NetBSD has (will have) "non-native" libraries in /usr/lib/.
+# For NetBSD/amd64 we thus have /usr/lib and /usr/lib/i386.
+
+MULTILIB_OPTIONS = m64/m32
+MULTILIB_DIRNAMES = 64 32
+MULTILIB_OSDIRNAMES = . ../lib/i386
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# The pushl in CTOR initialization interferes with frame pointer elimination.
+# crtend*.o cannot be compiled without -fno-asynchronous-unwind-tables,
+# because then __FRAME_END__ might not be the last thing in .eh_frame
+# section.
+CRTSTUFF_T_CFLAGS += -fno-omit-frame-pointer -fno-asynchronous-unwind-tables
diff -rNU3 dist.orig/gcc/config/ia64/netbsd.h dist/gcc/config/ia64/netbsd.h
--- dist.orig/gcc/config/ia64/netbsd.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/ia64/netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,56 @@
+/* Definitions of target machine for GNU compiler,
+ for ia64/ELF NetBSD systems.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ } \
+ while (0)
+
+
+/* Extra specs needed for NetBSD/ia-64 ELF. */
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_cpp_spec", NETBSD_CPP_SPEC }, \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+
+/* Provide a LINK_SPEC appropriate for a NetBSD/ia64 ELF target. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "%(netbsd_link_spec)"
+
+#define NETBSD_ENTRY_POINT "_start"
+
+
+/* Provide a CPP_SPEC appropriate for NetBSD. */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%(netbsd_cpp_spec)"
+
+
+#if 0
+/* Attempt to enable execute permissions on the stack. */
+#define TRANSFER_FROM_TRAMPOLINE NETBSD_ENABLE_EXECUTE_STACK
+#endif
diff -rNU3 dist.orig/gcc/config/m68k/m68k.md dist/gcc/config/m68k/m68k.md
--- dist.orig/gcc/config/m68k/m68k.md 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/m68k/m68k.md 2015-10-18 13:19:50.000000000 +0200
@@ -3124,16 +3124,33 @@
;; We need a separate DEFINE_EXPAND for u?mulsidi3 to be able to use the
;; proper matching constraint. This is because the matching is between
;; the high-numbered word of the DImode operand[0] and operand[1].
+;;
+;; Note: life_analysis() does not keep track of the individual halves of the
+;; DImode register. To prevent spurious liveness before the u?mulsidi3 insn
+;; (which causes "uninitialized variable" warnings), we explicitly clobber
+;; the DImode register.
(define_expand "umulsidi3"
- [(parallel
- [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
- (mult:SI (match_operand:SI 1 "register_operand" "")
- (match_operand:SI 2 "register_operand" "")))
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" ""))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "")
+
+(define_insn_and_split "*umulsidi3_split"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" ""))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "#"
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ [(clobber (match_dup 0))
+ (parallel
+ [(set (subreg:SI (match_dup 0) 4)
+ (mult:SI (match_dup 1) (match_dup 2)))
(set (subreg:SI (match_dup 0) 0)
(truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
(zero_extend:DI (match_dup 2)))
(const_int 32))))])]
- "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
"")
(define_insn ""
@@ -3164,15 +3181,27 @@
"mulu%.l %2,%3:%0")
(define_expand "mulsidi3"
- [(parallel
- [(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
- (mult:SI (match_operand:SI 1 "register_operand" "")
- (match_operand:SI 2 "register_operand" "")))
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" ""))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "")
+
+(define_insn_and_split "*mulsidi3_split"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" ""))))]
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ "#"
+ "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
+ [(clobber (match_dup 0))
+ (parallel
+ [(set (subreg:SI (match_dup 0) 4)
+ (mult:SI (match_dup 1) (match_dup 2)))
(set (subreg:SI (match_dup 0) 0)
(truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
(sign_extend:DI (match_dup 2)))
(const_int 32))))])]
- "TARGET_68020 && !TUNE_68060 && !TARGET_COLDFIRE"
"")
(define_insn ""
diff -rNU3 dist.orig/gcc/config/m68k/netbsd-elf.h dist/gcc/config/m68k/netbsd-elf.h
--- dist.orig/gcc/config/m68k/netbsd-elf.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/m68k/netbsd-elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -35,19 +35,34 @@
} \
while (0)
-/* Don't try using XFmode on the 68010. */
+/* Don't try using XFmode on the 68010 or coldfire. */
#undef LONG_DOUBLE_TYPE_SIZE
#define LONG_DOUBLE_TYPE_SIZE (TARGET_68020 ? 80 : 64)
#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
-#ifdef __mc68010__
+#if defined(__mc68010__) || defined(__mcoldfire__)
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
#else
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 80
#endif
+#undef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS \
+ { \
+ if (TARGET_COLDFIRE) \
+ { \
+ target_flags |= MASK_STRICT_ALIGNMENT | MASK_CF_HWDIV; \
+ if ((target_flags_explicit & MASK_HARD_FLOAT) == 0) \
+ { \
+ target_flags &= ~MASK_HARD_FLOAT; \
+ m68k_fpu = FPUTYPE_NONE; \
+ } \
+ } \
+ }
+
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_cpp_spec", NETBSD_CPP_SPEC }, \
{ "netbsd_entry_point", NETBSD_ENTRY_POINT },
@@ -56,20 +71,31 @@
whether or not use of the FPU is allowed. */
#undef CPP_SPEC
-#define CPP_SPEC NETBSD_CPP_SPEC
+#define CPP_SPEC \
+ "%(netbsd_cpp_spec)"
/* Provide an ASM_SPEC appropriate for NetBSD m68k ELF targets. We need
to pass PIC code generation options. */
#undef ASM_SPEC
-#define ASM_SPEC "%(asm_cpu_spec) %{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
+#define ASM_SPEC \
+ "%(asm_default_spec) \
+ %{m68010} %{m68020} %{m68030} %{m68040} %{m68060} \
+ %{m5200} %{m5206e} %{m528x} %{m5307} %{m5407} %{mcfv4e}\
+ %{mcpu=*:-mcpu=%*} %{march=*:-march=%*}\
+ %{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
/* Provide a LINK_SPEC appropriate for a NetBSD/m68k ELF target. */
#undef LINK_SPEC
#define LINK_SPEC NETBSD_LINK_SPEC_ELF
+/* NetBSD/sun2 does not support shlibs, avoid using libgcc_pic. */
+#if TARGET_DEFAULT_CPU == 0
+#undef REAL_LIBGCC_SPEC
+#endif
+
#define NETBSD_ENTRY_POINT "_start"
/* Output assembler code to FILE to increment profiler label # LABELNO
@@ -79,7 +105,13 @@
#define FUNCTION_PROFILER(FILE, LABELNO) \
do \
{ \
- asm_fprintf (FILE, "\tlea (%LLP%d,%Rpc),%Ra1\n", (LABELNO)); \
+ if (TARGET_COLDFIRE) \
+ { \
+ asm_fprintf (FILE, "\tmovea.l #%LLP%d-.,%Ra1\n", (LABELNO)); \
+ asm_fprintf (FILE, "\tlea (-6,%Rpc,%Ra1),%Ra1\n", (LABELNO)); \
+ } \
+ else \
+ asm_fprintf (FILE, "\tlea (%LLP%d,%Rpc),%Ra1\n", (LABELNO)); \
if (flag_pic) \
fprintf (FILE, "\tbsr.l __mcount@PLTPC\n"); \
else \
@@ -270,6 +302,8 @@
#undef STACK_BOUNDARY
#define STACK_BOUNDARY 32
+#undef PREFERRED_STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY 32
/* Alignment of field after `int : 0' in a structure.
diff -rNU3 dist.orig/gcc/config/m68k/t-m68010-netbsd dist/gcc/config/m68k/t-m68010-netbsd
--- dist.orig/gcc/config/m68k/t-m68010-netbsd 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/m68k/t-m68010-netbsd 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,4 @@
+# Use unwind-dw2-fde-glibc
+LIB2ADDEH = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde-glibc.c \
+ $(srcdir)/unwind-sjlj.c $(srcdir)/gthr-gnat.c $(srcdir)/unwind-c.c
+LIB2ADDEHDEP = unwind.inc unwind-dw2-fde.h unwind-dw2-fde.c
diff -rNU3 dist.orig/gcc/config/m68k/t-opts dist/gcc/config/m68k/t-opts
--- dist.orig/gcc/config/m68k/t-opts 2011-05-02 17:42:39.000000000 +0200
+++ dist/gcc/config/m68k/t-opts 2015-10-18 13:19:50.000000000 +0200
@@ -1,5 +1,7 @@
$(srcdir)/config/m68k/m68k-tables.opt: $(srcdir)/config/m68k/genopt.sh \
$(srcdir)/config/m68k/m68k-devices.def $(srcdir)/config/m68k/m68k-isas.def \
$(srcdir)/config/m68k/m68k-microarchs.def
+ @echo "NOT REBUILDING $@"
+NetBSD_DISABLED_m68k-tables.opt:
$(SHELL) $(srcdir)/config/m68k/genopt.sh $(srcdir)/config/m68k > \
$(srcdir)/config/m68k/m68k-tables.opt
diff -rNU3 dist.orig/gcc/config/mips/netbsd.h dist/gcc/config/mips/netbsd.h
--- dist.orig/gcc/config/mips/netbsd.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/mips/netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -32,16 +32,63 @@
if (TARGET_ABICALLS) \
builtin_define ("__ABICALLS__"); \
\
- if (mips_abi == ABI_EABI) \
- builtin_define ("__mips_eabi"); \
- else if (mips_abi == ABI_N32) \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ \
+ if (mips_abi == ABI_N32) \
+ { \
builtin_define ("__mips_n32"); \
+ builtin_define ("_ABIN32=2"); \
+ builtin_define ("_MIPS_SIM=_ABIN32"); \
+ builtin_define ("_MIPS_SZLONG=32"); \
+ builtin_define ("_MIPS_SZPTR=32"); \
+ } \
else if (mips_abi == ABI_64) \
+ { \
builtin_define ("__mips_n64"); \
+ builtin_define ("_ABI64=3"); \
+ builtin_define ("_MIPS_SIM=_ABI64"); \
+ builtin_define ("_MIPS_SZLONG=64"); \
+ builtin_define ("_MIPS_SZPTR=64"); \
+ } \
else if (mips_abi == ABI_O64) \
+ { \
builtin_define ("__mips_o64"); \
+ builtin_define ("_ABIO64=4"); \
+ builtin_define ("_MIPS_SIM=_ABIO64"); \
+ builtin_define ("_MIPS_SZLONG=64"); \
+ builtin_define ("_MIPS_SZPTR=64"); \
} \
- while (0)
+ else if (mips_abi == ABI_EABI) \
+ { \
+ builtin_define ("__mips_eabi"); \
+ builtin_define ("_ABIEMB=5"); \
+ builtin_define ("_MIPS_SIM=_ABIEMB"); \
+ if (TARGET_LONG64) \
+ builtin_define ("_MIPS_SZLONG=64"); \
+ else \
+ builtin_define ("_MIPS_SZLONG=32"); \
+ if (TARGET_64BIT) \
+ builtin_define ("_MIPS_SZPTR=64"); \
+ else \
+ builtin_define ("_MIPS_SZPTR=32"); \
+ } \
+ else \
+ { \
+ builtin_define ("__mips_o32"); \
+ builtin_define ("_ABIO32=1"); \
+ builtin_define ("_MIPS_SIM=_ABIO32"); \
+ builtin_define ("_MIPS_SZLONG=32"); \
+ builtin_define ("_MIPS_SZPTR=32"); \
+ } \
+ if (TARGET_FLOAT64) \
+ builtin_define ("_MIPS_FPSET=32"); \
+ else \
+ builtin_define ("_MIPS_FPSET=16"); \
+ \
+ builtin_define ("_MIPS_SZINT=32"); \
+ } while (0)
/* The generic MIPS TARGET_CPU_CPP_BUILTINS are incorrect for NetBSD.
Specifically, they define too many namespace-invasive macros. Override
@@ -97,6 +144,11 @@
builtin_define ("__mips=64"); \
builtin_define ("__mips_isa_rev=1"); \
} \
+ else if (ISA_MIPS64R2) \
+ { \
+ builtin_define ("__mips=64"); \
+ builtin_define ("__mips_isa_rev=2"); \
+ } \
\
if (TARGET_HARD_FLOAT) \
builtin_define ("__mips_hard_float"); \
@@ -111,6 +163,11 @@
else \
builtin_define ("__MIPSEL__"); \
\
+ if (TARGET_OCTEON) \
+ builtin_define ("__OCTEON__"); \
+ \
+ if (ISA_HAS_POP) \
+ builtin_define ("__mips_popcount"); \
/* No language dialect defines. */ \
\
/* ABIs handled in TARGET_OS_CPP_BUILTINS. */ \
@@ -136,10 +193,12 @@
#undef LINK_SPEC
#define LINK_SPEC \
- "%{EL:-m elf32lmip} \
- %{EB:-m elf32bmip} \
+ "%{EL:-m elf32ltsmip} \
+ %{EB:-m elf32btsmip} \
%(endian_spec) \
- %{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips32r2} %{mips64} \
+ %{G*} %{mips1} %{mips2} %{mips3} %{mips4} \
+ %{mips32} %{mips32r2} %{mips64} %{mips64r2} \
+ %{bestGnum} %{call_shared} %{no_archive} %{exact_version} \
%(netbsd_link_spec)"
#define NETBSD_ENTRY_POINT "__start"
@@ -169,6 +228,20 @@
/* Make gcc agree with */
+#undef SIZE_TYPE
+#define SIZE_TYPE ((POINTER_SIZE == 64 || TARGET_NEWABI) \
+ ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE ((POINTER_SIZE == 64 || TARGET_NEWABI) \
+ ? "long int" : "int")
+
+#undef INTPTR_TYPE
+#define INTPTR_TYPE PTRDIFF_TYPE
+
+#undef UINTPTR_TYPE
+#define UINTPTR_TYPE SIZE_TYPE
+
#undef WCHAR_TYPE
#define WCHAR_TYPE "int"
@@ -177,3 +250,6 @@
#undef WINT_TYPE
#define WINT_TYPE "int"
+
+#undef TARGET_WRITABLE_EH_FRAME
+#define TARGET_WRITABLE_EH_FRAME (flag_pic && TARGET_SHARED)
diff -rNU3 dist.orig/gcc/config/mips/netbsd64.h dist/gcc/config/mips/netbsd64.h
--- dist.orig/gcc/config/mips/netbsd64.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/mips/netbsd64.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,47 @@
+/* Definitions of target machine for GNU compiler, for MIPS NetBSD systems.
+ Copyright (C) 1993, 1995, 1996, 1997, 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* Force the default endianness and ABI flags onto the command line
+ in order to make the other specs easier to write. */
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ BASE_DRIVER_SELF_SPECS \
+ "%{!EB:%{!EL:%(endian_spec)}}", \
+ "%{!mabi=*: -mabi=n32}"
+
+/* Define default target values. */
+
+/* Provide a LINK_SPEC appropriate for a NetBSD/mips target.
+ This is a copy of LINK_SPEC from tweaked for
+ the MIPS target. */
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{mabi=64:-m elf64%{EB:b}%{EL:l}tsmip} \
+ %{mabi=32:-m elf32%{EB:b}%{EL:l}tsmip} \
+ %{mabi=o64:-m elf64%{EB:b}%{EL:l}tsmip} \
+ %{mabi=n32:-m elf32%{EB:b}%{EL:l}tsmipn32} \
+ %(endian_spec) \
+ %{G*} %{mips1} %{mips2} %{mips3} %{mips4} \
+ %{mips32} %{mips32r2} %{mips64} %{mips64r2} \
+ %{bestGnum} %{call_shared} %{no_archive} %{exact_version} \
+ %(netbsd_link_spec)"
diff -rNU3 dist.orig/gcc/config/mips/t-mips dist/gcc/config/mips/t-mips
--- dist.orig/gcc/config/mips/t-mips 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/mips/t-mips 2015-10-18 13:19:50.000000000 +0200
@@ -18,5 +18,7 @@
$(srcdir)/config/mips/mips-tables.opt: $(srcdir)/config/mips/genopt.sh \
$(srcdir)/config/mips/mips-cpus.def
+ @echo "NOT REBUILDING $@"
+NetBSD_DISABLED_config_mips_mips-tables.opt:
$(SHELL) $(srcdir)/config/mips/genopt.sh $(srcdir)/config/mips > \
$(srcdir)/config/mips/mips-tables.opt
diff -rNU3 dist.orig/gcc/config/mips/t-netbsd64 dist/gcc/config/mips/t-netbsd64
--- dist.orig/gcc/config/mips/t-netbsd64 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/mips/t-netbsd64 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,9 @@
+# NetBSD has (will have) "non-native" libraries in /usr/lib/.
+# For NetBSD/mips64 we thus have /usr/lib (n32), /usr/lib/o32 and /usr/lib/64.
+
+MULTILIB_OPTIONS = mabi=n32/mabi=64/mabi=32
+MULTILIB_DIRNAMES = n32 n64 o32
+MULTILIB_OSDIRNAMES = . ../lib/64 ../lib/o32
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff -rNU3 dist.orig/gcc/config/netbsd-elf.h dist/gcc/config/netbsd-elf.h
--- dist.orig/gcc/config/netbsd-elf.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/netbsd-elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -40,8 +40,11 @@
%{!p:crt0%O%s}}} \
%:if-exists(crti%O%s) \
%{static:%:if-exists-else(crtbeginT%O%s crtbegin%O%s)} \
- %{!static: \
- %{!shared:crtbegin%O%s} %{shared:crtbeginS%O%s}}"
+ %{!static: \
+ %{!shared: \
+ %{!pie:crtbegin%O%s} \
+ %{pie:crtbeginS%O%s}} \
+ %{shared:crtbeginS%O%s}}"
#undef STARTFILE_SPEC
#define STARTFILE_SPEC NETBSD_STARTFILE_SPEC
@@ -52,7 +55,10 @@
C++ file-scope static objects deconstructed after exiting "main". */
#define NETBSD_ENDFILE_SPEC \
- "%{!shared:crtend%O%s} %{shared:crtendS%O%s} \
+ "%{!shared: \
+ %{!pie:crtend%O%s} \
+ %{pie:crtendS%O%s}} \
+ %{shared:crtendS%O%s} \
%:if-exists(crtn%O%s)"
#undef ENDFILE_SPEC
@@ -70,6 +76,7 @@
#define NETBSD_LINK_SPEC_ELF \
"%{assert*} %{R*} %{rpath*} \
%{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
%{!shared: \
-dc -dp \
%{!nostdlib: \
@@ -84,3 +91,11 @@
#ifdef HAVE_LD_AS_NEEDED
#define USE_LD_AS_NEEDED 1
#endif
+
+#define MFLIB_SPEC " %{fmudflap: -export-dynamic -lmudflap \
+ %{static:%(link_gcc_c_sequence) -lmudflap}} \
+ %{fmudflapth: -export-dynamic -lmudflapth -lpthread \
+ %{static:%(link_gcc_c_sequence) -lmudflapth}} "
+
+#undef TARGET_UNWIND_TABLES_DEFAULT
+#define TARGET_UNWIND_TABLES_DEFAULT true
diff -rNU3 dist.orig/gcc/config/netbsd-stdint.h dist/gcc/config/netbsd-stdint.h
--- dist.orig/gcc/config/netbsd-stdint.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/netbsd-stdint.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,72 @@
+/* Definitions for types for NetBSD systems.
+ Copyright (C) 2009 Free Software Foundation, Inc.
+ Contributed by Gerald Pfeifer .
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+. */
+
+#define SIG_ATOMIC_TYPE "int"
+
+#define INT8_TYPE "signed char"
+#define INT16_TYPE "short int"
+#define INT32_TYPE "int"
+#define INT64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int")
+#define UINT8_TYPE "unsigned char"
+#define UINT16_TYPE "short unsigned int"
+#define UINT32_TYPE "unsigned int"
+#define UINT64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int")
+
+#define INT_LEAST8_TYPE "signed char"
+#define INT_LEAST16_TYPE "short int"
+#define INT_LEAST32_TYPE "int"
+#define INT_LEAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int")
+#define UINT_LEAST8_TYPE "unsigned char"
+#define UINT_LEAST16_TYPE "short unsigned int"
+#define UINT_LEAST32_TYPE "unsigned int"
+#define UINT_LEAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int")
+
+#ifdef CHAR_FAST8
+#define INT_FAST8_TYPE (LONG_TYPE_SIZE == 64 ? "int" : "signed char")
+#else
+#define INT_FAST8_TYPE "int"
+#endif
+#ifdef SHORT_FAST16
+#define INT_FAST16_TYPE (LONG_TYPE_SIZE == 64 ? "int" : "short int")
+#else
+#define INT_FAST16_TYPE "int"
+#endif
+#define INT_FAST32_TYPE "int"
+#define INT_FAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int")
+#ifdef CHAR_FAST8
+#define UINT_FAST8_TYPE (LONG_TYPE_SIZE == 64 ? "unsigned int" : "unsigned char")
+#else
+#define UINT_FAST8_TYPE "unsigned int"
+#endif
+#ifdef SHORT_FAST16
+#define UINT_FAST16_TYPE (LONG_TYPE_SIZE == 64 ? "unsigned int" : "short unsigned int")
+#else
+#define UINT_FAST16_TYPE "unsigned int"
+#endif
+#define UINT_FAST32_TYPE "unsigned int"
+#define UINT_FAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int")
+
+#define INTPTR_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "int")
+#define UINTPTR_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "unsigned int")
diff -rNU3 dist.orig/gcc/config/netbsd.h dist/gcc/config/netbsd.h
--- dist.orig/gcc/config/netbsd.h 2013-01-10 21:38:27.000000000 +0100
+++ dist/gcc/config/netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -36,37 +36,59 @@
/* NETBSD_NATIVE is defined when gcc is integrated into the NetBSD
source tree so it can be configured appropriately without using
- the GNU configure/build mechanism. */
+ the GNU configure/build mechanism.
-#ifdef NETBSD_NATIVE
+ NETBSD_TOOLS is defined when gcc is built as cross-compiler for
+ the in-tree toolchain.
+ */
+
+#if defined(NETBSD_NATIVE) || defined(NETBSD_TOOLS)
/* Look for the include files in the system-defined places. */
#undef GPLUSPLUS_INCLUDE_DIR
#define GPLUSPLUS_INCLUDE_DIR "/usr/include/g++"
+#undef GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT
+#define GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT 1
+
+#undef GPLUSPLUS_BACKWARD_INCLUDE_DIR
+#define GPLUSPLUS_BACKWARD_INCLUDE_DIR "/usr/include/g++/backward"
+
+#undef GCC_INCLUDE_DIR_ADD_SYSROOT
+#define GCC_INCLUDE_DIR_ADD_SYSROOT 1
+
+/*
+ * XXX figure out a better way to do this
+ */
#undef GCC_INCLUDE_DIR
-#define GCC_INCLUDE_DIR "/usr/include"
+#define GCC_INCLUDE_DIR "/usr/include/gcc-4.8"
-#undef INCLUDE_DEFAULTS
-#define INCLUDE_DEFAULTS \
- { \
- { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
- { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
- { 0, 0, 0, 0 } \
- }
+/* Under NetBSD, the normal location of the various *crt*.o files is the
+ /usr/lib directory. */
+#undef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/lib/"
+#undef STANDARD_STARTFILE_PREFIX_1
+#define STANDARD_STARTFILE_PREFIX_1 "/usr/lib/"
+
+#endif /* NETBSD_NATIVE || NETBSD_TOOLS */
+
+#if defined(NETBSD_NATIVE)
/* Under NetBSD, the normal location of the compiler back ends is the
/usr/libexec directory. */
#undef STANDARD_EXEC_PREFIX
#define STANDARD_EXEC_PREFIX "/usr/libexec/"
-/* Under NetBSD, the normal location of the various *crt*.o files is the
- /usr/lib directory. */
+#undef TOOLDIR_BASE_PREFIX
+#define TOOLDIR_BASE_PREFIX "../"
-#undef STANDARD_STARTFILE_PREFIX
-#define STANDARD_STARTFILE_PREFIX "/usr/lib/"
+#undef STANDARD_BINDIR_PREFIX
+#define STANDARD_BINDIR_PREFIX "/usr/bin"
+
+#undef STANDARD_LIBEXEC_PREFIX
+#define STANDARD_LIBEXEC_PREFIX STANDARD_EXEC_PREFIX
#endif /* NETBSD_NATIVE */
@@ -96,6 +118,7 @@
%{!pg:-lposix}} \
%{p:-lposix_p} \
%{pg:-lposix_p}} \
+ %{shared:-lc} \
%{!shared: \
%{!symbolic: \
%{!p: \
@@ -109,6 +132,7 @@
%{!pg:-lposix}} \
%{p:-lposix_p} \
%{pg:-lposix_p}} \
+ %{shared:-lc} \
%{!shared: \
%{!symbolic: \
%{!p: \
@@ -120,24 +144,18 @@
#undef LIB_SPEC
#define LIB_SPEC NETBSD_LIB_SPEC
-/* Provide a LIBGCC_SPEC appropriate for NetBSD. We also want to exclude
- libgcc with -symbolic. */
+#undef STATIC_LIBASAN_LIBS
+#define STATIC_LIBASAN_LIBS "-lstdc++ -lpthread"
-#ifdef NETBSD_NATIVE
-#define NETBSD_LIBGCC_SPEC \
- "%{!symbolic: \
- %{!shared: \
- %{!p: \
- %{!pg: -lgcc}}} \
- %{shared: -lgcc_pic} \
- %{p: -lgcc_p} \
- %{pg: -lgcc_p}}"
-#else
-#define NETBSD_LIBGCC_SPEC "%{!shared:%{!symbolic: -lgcc}}"
-#endif
+/* Pass -cxx-isystem to cc1/cc1plus. */
+#define NETBSD_CC1_AND_CC1PLUS_SPEC \
+ "%{cxx-isystem}"
+
+#undef CC1_SPEC
+#define CC1_SPEC NETBSD_CC1_AND_CC1PLUS_SPEC
-#undef LIBGCC_SPEC
-#define LIBGCC_SPEC NETBSD_LIBGCC_SPEC
+#undef CC1PLUS_SPEC
+#define CC1PLUS_SPEC NETBSD_CC1_AND_CC1PLUS_SPEC
/* When building shared libraries, the initialization and finalization
functions for the library are .init and .fini respectively. */
@@ -172,3 +190,10 @@
#undef WINT_TYPE
#define WINT_TYPE "int"
+
+#define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
diff -rNU3 dist.orig/gcc/config/or1k/constraints.md dist/gcc/config/or1k/constraints.md
--- dist.orig/gcc/config/or1k/constraints.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/constraints.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,59 @@
+;; Copyright (C) 2010 Embecosm Limited
+;;
+;; Contributed by Joern Rennecke in 2010
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+(define_constraint "I"
+ ""
+ (and (match_code "const_int")
+ (match_test "ival >= -32768 && ival <= 32767")))
+
+(define_constraint "J"
+ ""
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "K"
+ ""
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 65535")))
+
+(define_constraint "L"
+ ""
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 31")))
+
+(define_constraint "M"
+ ""
+ (and (match_code "const_int")
+ (match_test "(ival & 0xffff) == 0")))
+
+(define_constraint "N"
+ ""
+ (and (match_code "const_int")
+ (match_test "ival >= -33554432 && ival <= 33554431")))
+
+(define_constraint "O"
+ ""
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "C"
+ ""
+ (match_code "const_double"))
+
diff -rNU3 dist.orig/gcc/config/or1k/elf.h dist/gcc/config/or1k/elf.h
--- dist.orig/gcc/config/or1k/elf.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,31 @@
+/* Definitions for rtems targeting an OpenRisc OR1K using COFF
+ ??? this is for OR1K, but the rest of the above seems bogus.
+ Copyright (C) 1996, 1997, 2005 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Use ELF */
+#undef OBJECT_FORMAT_ELF
+#define OBJECT_FORMAT_ELF
+
+/* or1k debug info support is controlled by tm.h header files we include:
+ dbxelf.h enables optional stabs debug info.
+ elfos.h sets PREFERRED_DEBUGGING_TYPE to DWARF2_DEBUG . */
+
+#define DRIVER_SELF_SPECS "%{!mno-newlib:-mnewlib}"
diff -rNU3 dist.orig/gcc/config/or1k/linux-elf.h dist/gcc/config/or1k/linux-elf.h
--- dist.orig/gcc/config/or1k/linux-elf.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/linux-elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,98 @@
+/* Definitions for or1k running Linux-based GNU systems using ELF
+ Copyright (C) 2002, 2005
+ Free Software Foundation, Inc.
+ Contributed by Marko Mlinar
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* elfos.h should have already been included. Now just override
+ any conflicting definitions and add any extras. */
+
+/* Do not assume anything about header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* This is how we tell the assembler that two symbols have the same value. */
+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
+ do \
+ { \
+ assemble_name (FILE, NAME1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, NAME2); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+
+#if 0
+/* Node: Label Output */
+
+#define SET_ASM_OP "\t.set\t"
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0))
+
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+ } \
+ while (0)
+
+#endif
+
+/* The GNU C++ standard library requires that these macros be defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS ""
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+
+/* Define a set of Linux builtins. This is copied from linux.h. We can't
+ include the whole file for now, because that causes configure to require ld
+ to support --eh-frame-header, which it currently doesn't */
+#define LINUX_TARGET_OS_CPP_BUILTINS() \
+ do { \
+ builtin_define ("__gnu_linux__"); \
+ builtin_define_std ("linux"); \
+ builtin_define_std ("unix"); \
+ builtin_assert ("system=linux"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ } while (0)
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ LINUX_TARGET_OS_CPP_BUILTINS(); \
+ if (OPTION_UCLIBC) \
+ builtin_define ("__UCLIBC__"); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ } while (0)
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mnewlib:-entry 0x100} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
+ %{rdynamic:-export-dynamic} \
+ %{static:-static} \
+ %{shared:-shared}"
+
diff -rNU3 dist.orig/gcc/config/or1k/linux-gas.h dist/gcc/config/or1k/linux-gas.h
--- dist.orig/gcc/config/or1k/linux-gas.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/linux-gas.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,37 @@
+/* Definitions of target machine for GNU compiler.
+ Or32 Linux-based GNU systems version.
+ Copyright (C) 2002, 2005 Free Software Foundation, Inc.
+ Contributed by Marko Mlinar
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Unsigned chars produces much better code than signed. */
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Make gcc agree with */
+
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32
+
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. */
+#define CLEAR_INSN_CACHE(BEG, END) /* Do something here !!! */
diff -rNU3 dist.orig/gcc/config/or1k/netbsd.h dist/gcc/config/or1k/netbsd.h
--- dist.orig/gcc/config/or1k/netbsd.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,67 @@
+/* Definitions for or1k running NetBSD systems using ELF
+ Copyright (C) 2014
+ Free Software Foundation, Inc.
+ Contributed by Matt Thomas
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This is how we tell the assembler that two symbols have the same value. */
+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
+ do \
+ { \
+ assemble_name (FILE, NAME1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, NAME2); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS ""
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ } while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC NETBSD_CPP_SPEC
+
+#undef LIB_SPEC
+#define LIB_SPEC NETBSD_LIB_SPEC
+
+#undef LINK_SPEC
+#define LINK_SPEC NETBSD_LINK_SPEC_ELF
+
+#undef NETBSD_ENTRY_POINT
+#define NETBSD_ENTRY_POINT "_start"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT }, \
+ { "netbsd_endfile_spec", NETBSD_ENDFILE_SPEC },
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (/*MASK_HARD_FLOAT |*/ MASK_DOUBLE_FLOAT \
+ | MASK_HARD_DIV | MASK_HARD_MUL \
+ | MASK_MASK_CMOV | MASK_MASK_ROR | MASK_MASK_SEXT)
diff -rNU3 dist.orig/gcc/config/or1k/or1k-modes.def dist/gcc/config/or1k/or1k-modes.def
--- dist.orig/gcc/config/or1k/or1k-modes.def 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k-modes.def 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler, for OR32.
+ Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 2, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to the
+ Free Software Foundation, 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA. */
+
+/* Add any extra modes needed to represent the condition code.
+ */
+
+CC_MODE (CCEQ);
+CC_MODE (CCNE);
+
+CC_MODE (CCLE);
+CC_MODE (CCGE);
+CC_MODE (CCLT);
+CC_MODE (CCGT);
+
+CC_MODE (CCLEU);
+CC_MODE (CCGEU);
+CC_MODE (CCLTU);
+CC_MODE (CCGTU);
+
+CC_MODE(CCFP);
+CC_MODE(CCUNS);
diff -rNU3 dist.orig/gcc/config/or1k/or1k-opts.h dist/gcc/config/or1k/or1k-opts.h
--- dist.orig/gcc/config/or1k/or1k-opts.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k-opts.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,14 @@
+#ifndef OR1K_OPTS_H
+#define OR1K_OPTS_H
+
+enum or1k_delay {
+ OR1K_DELAY_OFF = 0,
+ OR1K_DELAY_ON = 1,
+ OR1K_DELAY_COMPAT = 2
+};
+
+#define TARGET_DELAY_ON (or1k_delay_selected == OR1K_DELAY_ON)
+#define TARGET_DELAY_OFF (or1k_delay_selected == OR1K_DELAY_OFF)
+#define TARGET_DELAY_COMPAT (or1k_delay_selected == OR1K_DELAY_COMPAT)
+
+#endif
diff -rNU3 dist.orig/gcc/config/or1k/or1k-protos.h dist/gcc/config/or1k/or1k-protos.h
--- dist.orig/gcc/config/or1k/or1k-protos.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k-protos.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,67 @@
+/* Definitions of target machine for GNU compiler, OR1K cpu.
+
+ Copyright (C) 2010 Embecosm Limited
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#ifndef GCC_OR1K_PROTOS_H
+#define GCC_OR1K_PROTOS_H
+
+/* The following are for general support. */
+extern int or1k_trampoline_code_size (void);
+
+/* The following are only needed when handling the machine definition. */
+#ifdef RTX_CODE
+extern void or1k_init_expanders (void);
+extern void or1k_expand_prologue (void);
+extern void or1k_expand_epilogue (void);
+extern bool or1k_expand_move (enum machine_mode mode, rtx operands[]);
+extern const char *or1k_output_move_double (rtx *operands);
+extern void or1k_expand_conditional_branch (rtx *operands,
+ enum machine_mode mode);
+extern int or1k_emit_cmove (rtx dest,
+ rtx op,
+ rtx true_cond,
+ rtx false_cond);
+extern enum machine_mode or1k_select_cc_mode (enum rtx_code op);
+extern const char *or1k_output_bf (rtx * operands);
+extern const char *or1k_output_cmov (rtx * operands);
+extern void or1k_emit_set_const32 (rtx op0,
+ rtx op1);
+extern bool or1k_expand_symbol_ref (enum machine_mode mode,
+ rtx operands[]);
+extern void or1k_expand_cmpxchg_qihi (rtx bval, rtx retval,
+ rtx mem, rtx oldval, rtx newval, int is_weak,
+ enum memmodel success_mode, enum memmodel failure_mode);
+extern void or1k_expand_fetch_op_qihi (rtx oldval, rtx mem, rtx operand,
+ rtx newval, rtx (*generator)(rtx, rtx, rtx, rtx, rtx));
+#endif
+
+#endif
+extern int or1k_struct_alignment (tree);
+extern int or1k_data_alignment (tree, int);
+
+extern int or1k_initial_elimination_offset (int, int);
+extern bool or1k_save_reg_p_cached (int regno);
+extern void or1k_print_jump_restore (rtx jump_address);
+extern rtx or1k_eh_return_handler_rtx (void);
+extern rtx or1k_return_addr_rtx (int, rtx);
+
+extern int or1k_legitimate_pic_operand_p (rtx x);
+
+/* For RETURN_ADDR_RTX */
+extern rtx get_hard_reg_initial_val (enum machine_mode, unsigned int);
diff -rNU3 dist.orig/gcc/config/or1k/or1k.c dist/gcc/config/or1k/or1k.c
--- dist.orig/gcc/config/or1k/or1k.c 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k.c 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,2433 @@
+/* Subroutines for insn-output.c for GNU compiler. OpenRISC 1000 version.
+ Copyright (C) 1987, 1992, 1997, 1999, 2000, 2001, 2002, 2003, 2004,
+ 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc
+ Copyright (C) 2010 Embecosm Limited
+
+ Contributed by Damjan Lampret in 1999.
+ Major optimizations by Matjaz Breskvar in 2005.
+ Updated for GCC 4.5 by Jeremy Bennett
+ and Joern Rennecke in 2010.
+
+ This file is part of GNU CC.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program. If not, see . */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+//#include "calls.h"
+//#include "varasm.h"
+//#include "obstack.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "function.h"
+#include "expr.h"
+#include "toplev.h"
+#include "recog.h"
+#include "ggc.h"
+#include "except.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "debug.h"
+#include "langhooks.h"
+#include "df.h"
+#include "dwarf2.h"
+#include "ansidecl.h"
+
+/* ========================================================================== */
+/* Local macros */
+
+/* Construct a l.movhi instruction for the given reg and value */
+#define OR1K_MOVHI(rd, k) \
+ ((0x6 << 26) | ((rd) << 21) | (k))
+
+/* Construct a l.ori instruction for the given two regs and value */
+#define OR1K_ORI(rd, ra, k) \
+ ((0x2a << 26) | ((rd) << 21) | ((ra) << 16) | (k))
+
+/* Construct a l.lwz instruction for the given two registers and offset */
+#define OR1K_LWZ(rd, ra, i) \
+ ((0x21 << 26) | ((rd) << 21) | ((ra) << 16) | (i))
+
+/* Construct a l.jr instruction for the given register */
+#define OR1K_JR(rb) \
+ ((0x11 << 26) | ((rb) << 11))
+
+#define OR1K_NOP \
+ (0x15 << 24)
+
+/* ========================================================================== */
+/* Static variables (i.e. global to this file only. */
+
+
+/*!Stack layout we use for pushing and poping saved registers */
+static struct
+{
+ bool save_lr_p;
+ int lr_save_offset;
+ bool save_fp_p;
+ int fp_save_offset;
+ int gpr_size;
+ int gpr_offset;
+ int total_size;
+ int vars_size;
+ int args_size;
+ int gpr_frame;
+ int late_frame;
+ HOST_WIDE_INT mask;
+} frame_info;
+
+
+/* ========================================================================== */
+/* Local (i.e. static) utility functions */
+
+/* -------------------------------------------------------------------------- */
+/*!Must the current function save a register?
+
+ @param[in] regno The register to consider.
+
+ @return Non-zero (TRUE) if current function must save "regno", zero
+ (FALSE) otherwise. */
+/* -------------------------------------------------------------------------- */
+static bool
+or1k_save_reg_p (int regno)
+{
+ /* No need to save the faked cc0 register. */
+ if (regno == OR1K_FLAGS_REG)
+ return false;
+
+ /* Check call-saved registers. */
+ if (df_regs_ever_live_p(regno) && !call_used_regs[regno])
+ return true;
+
+ /* We need to save the old frame pointer before setting up a new
+ one. */
+ if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
+ return true;
+
+ /* Save the stack pointer for DWARF2 for now.
+ * AFAIK, DWARF should be able to unwind using only the current stack
+ * register and the CFA offset, but I never got that to work. */
+ if (regno == STACK_POINTER_REGNUM && !frame_pointer_needed)
+ return true;
+
+ /* We need to save the incoming return address if it is ever clobbered
+ within the function. */
+ if (regno == LINK_REGNUM
+ && (df_regs_ever_live_p(regno) || crtl->uses_pic_offset_table
+ || cfun->machine->force_lr_save))
+ return true;
+
+ if(crtl->calls_eh_return)
+ {
+ unsigned int i;
+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
+ {
+ if ((unsigned int)regno == EH_RETURN_DATA_REGNO (i))
+ return true;
+ }
+ }
+
+ return false;
+
+} /* or1k_save_reg_p () */
+
+bool
+or1k_save_reg_p_cached (int regno)
+{
+ return (frame_info.mask & ((HOST_WIDE_INT) 1 << regno)) != 0;
+}
+
+/* N.B. contrary to the ISA documentation, the stack includes the outgoing
+ arguments. */
+/* -------------------------------------------------------------------------- */
+/*!Compute full frame size and layout.
+
+ Store information in "frame_info".
+
+ @param[in] size The size of the function's local variables.
+
+ @return Total size of stack frame. */
+/* -------------------------------------------------------------------------- */
+static HOST_WIDE_INT
+or1k_compute_frame_size (HOST_WIDE_INT size)
+{
+ HOST_WIDE_INT args_size;
+ HOST_WIDE_INT vars_size;
+ HOST_WIDE_INT stack_offset;
+ HOST_WIDE_INT save_size;
+ bool interrupt_p = false;
+ int regno;
+
+ args_size = crtl->outgoing_args_size;
+ vars_size = OR1K_ALIGN (size, 4);
+
+ frame_info.args_size = args_size;
+ frame_info.vars_size = vars_size;
+ frame_info.gpr_frame = interrupt_p ? or1k_redzone : 0;
+
+ /* If the function has local variables, we're committed to
+ allocating it anyway. Otherwise reclaim it here. */
+ /* FIXME: Verify this. Got if from the MIPS port. */
+ if (vars_size == 0 && crtl->is_leaf)
+ args_size = 0;
+
+ stack_offset = 0;
+
+ /* Save link register right at the bottom. */
+ if (or1k_save_reg_p (LINK_REGNUM))
+ {
+ stack_offset = stack_offset - UNITS_PER_WORD;
+ frame_info.lr_save_offset = stack_offset;
+ frame_info.save_lr_p = true;
+ }
+ else
+ frame_info.save_lr_p = false;
+
+ /* HACK: In PIC mode we need to save the PIC reg and the link reg in
+ in case the function is doing references through the got or plt,
+ but this information is not necessarily available when the initial
+ elimination offset is calculated, so we always reserve the space even
+ if it is not used... */
+ if (!frame_info.save_lr_p && flag_pic)
+ stack_offset = stack_offset - UNITS_PER_WORD;
+
+ /* Save frame pointer right after possible link register. */
+ if (frame_pointer_needed)
+ {
+ stack_offset = stack_offset - UNITS_PER_WORD;
+ frame_info.fp_save_offset = stack_offset;
+ frame_info.save_fp_p = true;
+ }
+ else
+ frame_info.save_fp_p = false;
+
+ frame_info.gpr_size = 0;
+ frame_info.mask = 0;
+
+ for (regno = 0; regno <= OR1K_LAST_ACTUAL_REG; regno++)
+ {
+ if (regno == LINK_REGNUM
+ || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM))
+ /* These have already been saved if so needed. */
+ continue;
+
+ if (or1k_save_reg_p (regno))
+ {
+ frame_info.gpr_size += UNITS_PER_WORD;
+ frame_info.mask |= ((HOST_WIDE_INT) 1 << regno);
+ }
+ }
+
+ if (!or1k_save_reg_p (PIC_OFFSET_TABLE_REGNUM)
+ && (crtl->uses_pic_offset_table || (flag_pic && frame_info.save_lr_p)))
+ {
+ frame_info.gpr_size += UNITS_PER_WORD;
+ frame_info.mask |= ((HOST_WIDE_INT) 1 << PIC_OFFSET_TABLE_REGNUM);
+ }
+ else if (flag_pic && !or1k_save_reg_p (PIC_OFFSET_TABLE_REGNUM))
+ frame_info.gpr_size += UNITS_PER_WORD;
+
+ save_size = (frame_info.gpr_size
+ + (frame_info.save_fp_p ? UNITS_PER_WORD : 0)
+ + (frame_info.save_lr_p || flag_pic ? UNITS_PER_WORD : 0));
+ frame_info.total_size = save_size + vars_size + args_size;
+ gcc_assert (PROLOGUE_TMP != STATIC_CHAIN_REGNUM);
+ if (frame_info.total_size > 32767 && interrupt_p)
+ {
+ int n_extra
+ = (!!(~frame_info.mask && 1 << PROLOGUE_TMP)
+ + !!(~frame_info.mask & 1 << EPILOGUE_TMP)) * UNITS_PER_WORD;
+
+ save_size += n_extra;
+ frame_info.gpr_size += n_extra;
+ frame_info.total_size += n_extra;
+ frame_info.mask |= (1 << PROLOGUE_TMP) | (1 << EPILOGUE_TMP);
+ }
+
+ stack_offset -= frame_info.gpr_size;
+ frame_info.gpr_offset = stack_offset;
+ frame_info.late_frame = frame_info.total_size;
+
+ if (save_size > or1k_redzone
+ || (frame_info.gpr_frame
+ && (frame_info.gpr_frame + frame_info.late_frame <= 32767)))
+ {
+ if (frame_info.gpr_frame + frame_info.late_frame <= 32767)
+ save_size = frame_info.total_size;
+ frame_info.gpr_frame += save_size;
+ frame_info.lr_save_offset += save_size;
+ frame_info.fp_save_offset += save_size;
+ frame_info.gpr_offset += save_size;
+ frame_info.late_frame -= save_size;
+ /* FIXME: check in TARGET_OVERRIDE_OPTIONS for invalid or1k_redzone. */
+ gcc_assert (frame_info.gpr_frame <= 32767);
+ gcc_assert ((frame_info.gpr_frame & 3) == 0);
+ }
+
+ return frame_info.total_size;
+
+} /* or1k_compute_frame_size () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Emit a frame related insn.
+
+ Same as emit_insn, but sets RTX_FRAME_RELATED_P to one. Getting this right
+ will matter for DWARF 2 output, if prologues are handled via the "prologue"
+ pattern rather than target hooks.
+
+ @param[in] insn The insn to emit.
+
+ @return The RTX for the emitted insn. */
+/* -------------------------------------------------------------------------- */
+static rtx
+emit_frame_insn (rtx insn)
+{
+ insn = emit_insn (insn);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ return (insn);
+
+} /* emit_frame_insn () */
+
+
+/* -------------------------------------------------------------------------- */
+/* Generate a RTX for the indexed memory address based on stack_pointer_rtx
+ and a displacement
+
+ @param[in] disp The displacement
+
+ @return The RTX for the generated address. */
+/* -------------------------------------------------------------------------- */
+static rtx
+stack_disp_mem (HOST_WIDE_INT disp)
+{
+ return gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx, disp));
+}
+
+enum machine_mode
+or1k_select_cc_mode (enum rtx_code op)
+{
+ switch (op) {
+ case EQ: return CCEQmode;
+ case NE: return CCNEmode;
+ case GEU: return CCGEUmode;
+ case GTU: return CCGTUmode;
+ case LTU: return CCLTUmode;
+ case LEU: return CCLEUmode;
+ case GE: return CCGEmode;
+ case LT: return CCLTmode;
+ case GT: return CCGTmode;
+ case LE: return CCLEmode;
+ default: gcc_unreachable ();
+ }
+}
+
+/* -------------------------------------------------------------------------- */
+/*!Generate insn patterns to do an integer compare of operands.
+
+ @param[in] code RTX for the condition code.
+ @param[in] op0 RTX for the first operand.
+ @param[in] op1 RTX for the second operand.
+
+ @return RTX for the comparison. */
+/* -------------------------------------------------------------------------- */
+static rtx
+or1k_expand_int_compare (enum rtx_code code,
+ rtx op0,
+ rtx op1)
+{
+ enum machine_mode cmpmode;
+ rtx tmp, flags;
+
+ cmpmode = or1k_select_cc_mode (code);
+ flags = gen_rtx_REG (cmpmode, OR1K_FLAGS_REG);
+
+ /* This is very simple, but making the interface the same as in the
+ FP case makes the rest of the code easier. */
+ tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
+
+ /* Return the test that should be put into the flags user, i.e.
+ the bcc, scc, or cmov instruction. */
+ return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
+
+} /* or1k_expand_int_compare () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Generate insn patterns to do an integer compare of operands.
+
+ We only deal with the case where the comparison is an integer
+ comparison. This wrapper function potentially allows reuse for non-integer
+ comparison in the future.
+
+ @param[in] code RTX for the condition code.
+ @param[in] op0 RTX for the first operand.
+ @param[in] op1 RTX for the second operand.
+
+ @return RTX for the comparison. */
+/* -------------------------------------------------------------------------- */
+static rtx
+or1k_expand_compare (enum rtx_code code, rtx op0, rtx op1)
+{
+ return or1k_expand_int_compare (code, op0, op1);
+
+} /* or1k_expand_compare () */
+
+
+/* TODO(bluecmd): Write documentation for this function */
+void
+or1k_expand_cmpxchg_qihi (rtx bval, rtx retval, rtx mem, rtx oldval, rtx newval,
+ int is_weak, enum memmodel success_mode,
+ enum memmodel failure_mode)
+{
+ rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
+ rtx addr = gen_reg_rtx (Pmode);
+ rtx off = gen_reg_rtx (SImode);
+ rtx shifter = gen_reg_rtx (SImode);
+ rtx retword = gen_reg_rtx (SImode);
+ rtx mask = gen_reg_rtx (SImode);
+ rtx shifted_oldval = gen_reg_rtx (SImode);
+ rtx shifted_newval = gen_reg_rtx (SImode);
+ rtx shifted_mask = gen_reg_rtx (SImode);
+ rtx mask_const;
+ rtx memsi;
+ enum machine_mode mode = GET_MODE (mem);
+
+ oldval = gen_lowpart_common (SImode, oldval);
+ newval = gen_lowpart_common (SImode, newval);
+
+ mask_const = gen_rtx_CONST_INT (VOIDmode,
+ mode == QImode ? 0xff : 0xffff);
+ emit_insn (gen_rtx_SET (VOIDmode, mask, mask_const));
+
+ /* align address and retrieve the offset. */
+ emit_insn (gen_rtx_SET (VOIDmode, addr,
+ gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
+ emit_insn (gen_rtx_SET (VOIDmode, off,
+ gen_rtx_AND (SImode, addr1, GEN_INT (3))));
+ emit_insn (gen_rtx_SET (VOIDmode, off,
+ gen_rtx_XOR (SImode, off,
+ GEN_INT (GET_MODE (mem) == QImode
+ ? 3 : 2))));
+
+ memsi = gen_rtx_MEM (SImode, addr);
+
+ /* shift all arguments to be aligned to where the data we want
+ * to operate on is located. */
+ emit_insn (gen_rtx_SET (VOIDmode, shifter,
+ gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
+
+ emit_insn (gen_ashlsi3 (shifted_oldval, oldval, shifter));
+ emit_insn (gen_ashlsi3 (shifted_newval, newval, shifter));
+ emit_insn (gen_ashlsi3 (shifted_mask, mask, shifter));
+
+ emit_insn (gen_cmpxchg_mask (bval, retword, memsi, shifted_oldval,
+ shifted_newval, shifted_mask));
+
+ /* shift the data we care about to the lower end. */
+ emit_insn (gen_lshrsi3 (retword, retword, shifter));
+
+ emit_move_insn (retval, gen_lowpart (GET_MODE (retval), retword));
+}
+
+/* TODO(bluecmd): Write documentation for this function */
+void
+or1k_expand_fetch_op_qihi (rtx oldval, rtx mem, rtx operand, rtx newval,
+ rtx (*generator)(rtx, rtx, rtx, rtx, rtx))
+{
+ rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
+ rtx addr = gen_reg_rtx (Pmode);
+ rtx off = gen_reg_rtx (SImode);
+ rtx shifter = gen_reg_rtx (SImode);
+ rtx mask = gen_reg_rtx (SImode);
+ rtx shifted_oldval = gen_reg_rtx (SImode);
+ rtx shifted_newval = gen_reg_rtx (SImode);
+ rtx shifted_operand = gen_reg_rtx (SImode);
+ rtx shifted_mask = gen_reg_rtx (SImode);
+ rtx mask_const;
+ rtx memsi;
+ enum machine_mode mode = GET_MODE (mem);
+
+ /* TODO(bluecmd): A lot of code is shared between cmpxchg and this. We should
+ * move it to nice functions. */
+ operand = gen_lowpart_common (SImode, operand);
+
+ mask_const = gen_rtx_CONST_INT (VOIDmode,
+ mode == QImode ? 0xff : 0xffff);
+ emit_insn (gen_rtx_SET (VOIDmode, mask, mask_const));
+
+ /* align address and retrieve the offset. */
+ emit_insn (gen_rtx_SET (VOIDmode, addr,
+ gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
+ emit_insn (gen_rtx_SET (VOIDmode, off,
+ gen_rtx_AND (SImode, addr1, GEN_INT (3))));
+ emit_insn (gen_rtx_SET (VOIDmode, off,
+ gen_rtx_XOR (SImode, off,
+ GEN_INT (GET_MODE (mem) == QImode
+ ? 3 : 2))));
+
+ memsi = gen_rtx_MEM (SImode, addr);
+
+ /* shift all arguments to be aligned to where the data we want
+ * to operate on is located. */
+ emit_insn (gen_rtx_SET (VOIDmode, shifter,
+ gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
+
+ emit_insn (gen_ashlsi3 (shifted_operand, operand, shifter));
+ emit_insn (gen_ashlsi3 (shifted_mask, mask, shifter));
+
+ emit_insn (generator (shifted_oldval, memsi, shifted_operand,
+ shifted_newval, shifted_mask));
+
+ /* shift the data we care about to the lower end. */
+ emit_insn (gen_lshrsi3 (shifted_oldval, shifted_oldval, shifter));
+ emit_insn (gen_lshrsi3 (shifted_newval, shifted_newval, shifter));
+ emit_move_insn (oldval, gen_lowpart (GET_MODE (oldval), shifted_oldval));
+ emit_move_insn (newval, gen_lowpart (GET_MODE (newval), shifted_newval));
+}
+
+/* -------------------------------------------------------------------------- */
+/*!Emit insns to use the l.cmov instruction
+
+ Emit a compare and then cmov. Only works for integer first operand.
+
+ @param[in] dest RTX for the destination operand.
+ @param[in] op RTX for the comparison operation
+ @param[in] true_cond RTX to move to dest if condition is TRUE.
+ @param[in] false_cond RTX to move to dest if condition is FALSE.
+
+ @return Non-zero (TRUE) if insns were emitted, zero (FALSE) otherwise. */
+/* -------------------------------------------------------------------------- */
+static int
+or1k_emit_int_cmove (rtx dest,
+ rtx op,
+ rtx true_cond,
+ rtx false_cond)
+{
+ rtx condition_rtx, cr;
+ rtx op0 = XEXP (op, 0);
+ rtx op1 = XEXP (op, 1);
+
+ if ((GET_MODE (op0) != SImode) &&
+ (GET_MODE (op0) != HImode) &&
+ (GET_MODE (op0) != QImode))
+ {
+ return 0;
+ }
+
+ /* We still have to do the compare, because cmov doesn't do a compare, it
+ just looks at the FLAG bit set by a previous compare instruction. */
+ condition_rtx = or1k_expand_compare (GET_CODE (op), op0, op1);
+
+ cr = XEXP (condition_rtx, 0);
+
+ emit_insn (gen_cmov (dest, condition_rtx, true_cond, false_cond, cr));
+
+ return 1;
+
+} /* or1k_emit_int_cmove () */
+
+
+static void
+or1k_print_operand_address (FILE *stream, rtx addr)
+{
+ rtx offset;
+
+ switch (GET_CODE (addr))
+ {
+ case MEM:
+ if (GET_CODE (XEXP (addr, 0)) == REG)
+ fprintf (stream, "%s", reg_names[REGNO (addr)]);
+ else
+ abort ();
+ break;
+
+ case REG:
+ fprintf (stream, "0(%s)", reg_names[REGNO (addr)]);
+ break;
+
+ case PLUS:
+ offset = 0;
+
+ if (GET_CODE (XEXP (addr, 0)) == REG)
+ {
+ offset = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == REG)
+ {
+ offset = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ output_address (offset);
+ fprintf (stream, "(%s)", reg_names[REGNO (addr)]);
+ break;
+
+ case SYMBOL_REF:
+ if (SYMBOL_REF_DECL (addr))
+ assemble_external (SYMBOL_REF_DECL (addr));
+
+ if (XSTR (addr, 0)[0] == '*')
+ fputs (&XSTR (addr, 0)[1], stream);
+ else
+ {
+ asm_fprintf (stream, "%U%s", XSTR (addr, 0));
+ }
+ break;
+
+ default:
+ output_addr_const (stream, addr);
+ }
+}
+
+/* -------------------------------------------------------------------------- */
+/*!Is this a value suitable for an OR1K address displacement?
+
+ Must be an integer (signed) which fits into 16-bits. If the result is a
+ double word, we had better also check that we can also get at the second
+ word.
+
+ @param[in] mode Mode of the result for which this displacement will be
+ used.
+ @param[in] x RTX for an expression.
+
+ @return Non-zero (TRUE) if this is a valid 16-bit offset, zero (FALSE)
+ otherwise. */
+/* -------------------------------------------------------------------------- */
+static int
+or1k_legitimate_displacement_p (enum machine_mode mode,
+ rtx x)
+{
+ if (CONST_INT == GET_CODE(x))
+ {
+ HOST_WIDE_INT disp = INTVAL (x);
+
+ /* Allow for a second access 4 bytes further on if double. */
+ if ((DFmode == mode) || (DImode == mode))
+ {
+ return (-32768 < disp) && (disp <= 32763);
+ }
+ else
+ {
+ return (-32768 < disp) && (disp <= 32767);
+ }
+ }
+ else
+ {
+ return 0;
+ }
+} /* or1k_legitimate_displacement_p () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Can this register be used as a base register?
+
+ We need a strict version, for which the register must either be a hard
+ register, or already renumbered to a hard register.
+
+ For the non-strict version, any register (other than the flag register will
+ do).
+
+ @todo The code from the old port does not allow r0 as a base when strict,
+ and does when non-strict. Surely it is always a valid register?
+
+ @param[in] regno The register to test
+ @param[in] strict Non-zero (TRUE) if this is a strict check, zero (FALSE)
+ otherwise.
+
+ @return Non-zero (TRUE) if this register can be used as a base register,
+ zero (FALSE) otherwise. */
+/* -------------------------------------------------------------------------- */
+static bool
+or1k_regnum_ok_for_base_p (HOST_WIDE_INT num,
+ bool strict)
+{
+ if (strict)
+ {
+ return (num < FIRST_PSEUDO_REGISTER)
+ ? (num > 0) && (num <= OR1K_LAST_INT_REG)
+ : (reg_renumber[num] > 0) && (reg_renumber[num] <= OR1K_LAST_INT_REG);
+ }
+ else
+ {
+ return (num <= OR1K_LAST_INT_REG) || (num >= FIRST_PSEUDO_REGISTER);
+ }
+} /* or1k_regnum_ok_for_base_p () */
+
+int
+or1k_legitimate_pic_operand_p (rtx x)
+{
+ if (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && (!SYMBOL_REF_LOCAL_P (XEXP (XEXP (x, 0), 0))
+ || SYMBOL_REF_WEAK (XEXP (XEXP (x, 0), 0))))
+ || GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ return or1k_legitimate_displacement_p (SImode, XEXP (XEXP (x, 0), 1));
+
+ return 1;
+}
+
+static bool
+or1k_expand_pic_symbol_ref (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx operands[])
+{
+ if (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == SYMBOL_REF
+ && SYMBOL_REF_LOCAL_P (operands[1])
+ && !SYMBOL_REF_WEAK (operands[1])))
+ {
+ crtl->uses_pic_offset_table = 1;
+ emit_insn (gen_movsi_gotoffhi (operands[0], operands[1]));
+ emit_insn (gen_movsi_gotofflo (operands[0], operands[0],
+ operands[1]));
+ emit_insn (gen_add3_insn(operands[0], operands[0],
+ pic_offset_table_rtx));
+ return true;
+ }
+ else if (GET_CODE (operands[1]) == SYMBOL_REF)
+ {
+ crtl->uses_pic_offset_table = 1;
+ emit_insn (gen_movsi_got (operands[0], operands[1]));
+ return true;
+ }
+ else if (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT)
+ {
+ rtx symbolref = XEXP (XEXP (operands[1], 0), 0);
+ crtl->uses_pic_offset_table = 1;
+
+ if (SYMBOL_REF_LOCAL_P (symbolref)
+ && !SYMBOL_REF_WEAK (symbolref))
+ {
+ emit_insn (gen_movsi_gotoffhi (operands[0], operands[1]));
+ emit_insn (gen_movsi_gotofflo (operands[0], operands[0],
+ operands[1]));
+ emit_insn (gen_add3_insn(operands[0], operands[0],
+ pic_offset_table_rtx));
+ }
+ else
+ {
+ rtx const_int = XEXP (XEXP (operands[1], 0), 1);
+
+ /* Expand the constant into a register if it doesn't
+ fit directly as an 16-bit immediate in the add below.
+ Note that the reg allocation is allowed here since
+ we are guarded by LEGITIMATE_PIC_OPERAND_P. */
+ if (!or1k_legitimate_displacement_p (mode, const_int))
+ {
+ rtx scratch = gen_reg_rtx (mode);
+
+ or1k_emit_set_const32 (scratch, const_int);
+ const_int = scratch;
+ }
+
+ emit_insn (gen_movsi_got (operands[0], symbolref));
+ emit_insn (gen_add3_insn(operands[0], operands[0], const_int));
+ }
+ return true;
+ }
+ return false;
+}
+
+/* Return the TLS type for TLS symbols, 0 otherwise. */
+enum tls_model
+or1k_tls_symbolic_operand (rtx op)
+{
+ if (GET_CODE (op) == CONST)
+ {
+ rtx sym, addend;
+ split_const (op, &sym, &addend);
+ if (GET_CODE (sym) == SYMBOL_REF)
+ return SYMBOL_REF_TLS_MODEL (sym);
+ }
+ else if (GET_CODE (op) == SYMBOL_REF)
+ return SYMBOL_REF_TLS_MODEL (op);
+
+ return TLS_MODEL_NONE;
+}
+
+static GTY(()) rtx gen_tls_tga;
+
+/* Get reference to the '__tls_get_addr' symbol */
+static rtx
+gen_tls_get_addr (void)
+{
+ if (!gen_tls_tga)
+ gen_tls_tga = init_one_libfunc ("__tls_get_addr");
+ return gen_tls_tga;
+}
+
+/* Emit call to '__tls_get_addr' */
+static void
+or1k_tls_call (rtx dest, rtx arg)
+{
+ emit_library_call_value (gen_tls_get_addr(), dest,
+ LCT_CONST, Pmode, 1, arg, Pmode);
+}
+
+static rtx
+or1k_legitimize_tls_address (rtx dest, rtx x)
+{
+ rtx sym;
+ rtx tp = gen_rtx_REG(Pmode, THREAD_PTR_REGNUM);
+ rtx addend = NULL_RTX;
+ rtx result = dest;
+
+ enum tls_model tls_kind = or1k_tls_symbolic_operand (x);
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ sym = gen_rtx_SYMBOL_REF(Pmode, XSTR(x, 0));
+ else if (GET_CODE (x) == CONST)
+ {
+ result = gen_reg_rtx (Pmode);
+ split_const (x, &sym, &addend);
+ sym = gen_rtx_SYMBOL_REF(Pmode, XSTR(sym, 0));
+ }
+ else
+ gcc_unreachable ();
+
+ switch (tls_kind) {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ {
+ /* TODO: For now, treat LD as GD */
+ rtx hi = gen_reg_rtx (Pmode);
+ rtx offset = gen_reg_rtx (Pmode);
+ rtx addr = gen_reg_rtx (Pmode);
+ crtl->uses_pic_offset_table = 1;
+ /* Generate a new symbol ref that is not marked as TLS or we will recurse
+ * in or1k_legitimate_constant_p. */
+ emit_insn (gen_movsi_tlsgdhi (hi, sym));
+ emit_insn (gen_movsi_tlsgdlo (offset, hi, sym));
+ emit_insn (gen_add3_insn (addr, offset, pic_offset_table_rtx));
+ or1k_tls_call (result, addr);
+ break;
+ }
+ case TLS_MODEL_INITIAL_EXEC:
+ {
+ rtx hi = gen_reg_rtx (Pmode);
+ rtx offset = gen_reg_rtx (Pmode);
+ rtx addr = gen_reg_rtx (Pmode);
+ rtx tpoffset = gen_reg_rtx (Pmode);
+ crtl->uses_pic_offset_table = 1;
+ emit_insn (gen_movsi_gottpoffhi (hi, sym));
+ emit_insn (gen_movsi_gottpofflo (offset, hi, sym));
+ emit_insn (gen_add3_insn (addr, offset, pic_offset_table_rtx));
+ emit_insn (gen_load_gottpoff (tpoffset, addr));
+ emit_insn (gen_add3_insn (result, tpoffset, tp));
+ break;
+ }
+ case TLS_MODEL_LOCAL_EXEC:
+ {
+ rtx hi = gen_reg_rtx (Pmode);
+ rtx addr = gen_reg_rtx (Pmode);
+ emit_insn (gen_movsi_tpoffhi (hi, sym));
+ emit_insn (gen_movsi_tpofflo (addr, hi, sym));
+ emit_insn (gen_add3_insn (result, addr, tp));
+ break;
+ }
+ default:
+ gcc_unreachable ();
+ }
+
+ if (addend != NULL_RTX)
+ emit_insn (gen_add3_insn (dest, result, addend));
+
+ return dest;
+}
+
+static rtx
+or1k_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (or1k_tls_symbolic_operand (x) != TLS_MODEL_NONE)
+ return or1k_legitimize_tls_address (gen_reg_rtx (Pmode), x);
+
+ return x;
+}
+
+static bool
+or1k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ return or1k_tls_symbolic_operand (x) != TLS_MODEL_NONE;
+}
+
+bool
+or1k_expand_symbol_ref(enum machine_mode mode, rtx operands[])
+{
+ if (flag_pic && or1k_expand_pic_symbol_ref(mode, operands))
+ return true;
+
+ return false;
+}
+
+bool
+or1k_expand_move (enum machine_mode mode, rtx operands[])
+{
+ if (can_create_pseudo_p ())
+ {
+ if (GET_CODE (operands[0]) == MEM
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == MEM))
+ {
+ /* Source operand for store must be in a register. */
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ }
+
+ if (or1k_tls_symbolic_operand (operands[1]) != TLS_MODEL_NONE)
+ {
+ or1k_legitimize_tls_address (force_reg (Pmode, operands[0]),
+ operands[1]);
+ return true;
+ }
+
+ if (or1k_expand_symbol_ref (mode, operands))
+ return true;
+
+ /* Working with CONST_INTs is easier, so convert
+ a double if needed. */
+
+ if (GET_CODE (operands[1]) == CONST_DOUBLE) {
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+
+ /* Handle sets of MEM first. */
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (register_operand(operands[1], SImode)
+ || (operands[1] == const0_rtx))
+ goto movsi_is_ok;
+
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operands[0]);
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+ }
+
+ /* This makes sure we will not get rematched due to splittage. */
+ if (! CONSTANT_P (operands[1]) || input_operand (operands[1], SImode))
+ ;
+ else if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != LO_SUM)
+ {
+ or1k_emit_set_const32 (operands[0], operands[1]);
+ return true;
+ }
+ movsi_is_ok:
+ ;
+
+ return false;
+}
+
+/* -------------------------------------------------------------------------- */
+/*!Emit a move from SRC to DEST.
+
+ Assume that the move expanders can handle all moves if !can_create_pseudo_p
+ (). The distinction is important because, unlike emit_move_insn, the move
+ expanders know how to force Pmode objects into the constant pool even when
+ the constant pool address is not itself legitimate.
+
+ @param[in] dest Destination of the move.
+ @param[in] src Source for the move.
+
+ @return RTX for the move. */
+/* -------------------------------------------------------------------------- */
+static rtx
+or1k_emit_move (rtx dest, rtx src)
+{
+ return (can_create_pseudo_p ()
+ ? emit_move_insn (dest, src)
+ : emit_move_insn_1 (dest, src));
+
+} /* or1k_emit_move () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Emit an instruction of the form (set TARGET (CODE OP0 OP1)).
+
+ @param[in] code The code for the operation.
+ @param[in] target Destination for the set operation.
+ @param[in] op0 First operand.
+ @param[in] op1 Second operand. */
+/* -------------------------------------------------------------------------- */
+static void
+or1k_emit_binary (enum rtx_code code,
+ rtx target,
+ rtx op0,
+ rtx op1)
+{
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
+
+} /* or1k_emit_binary () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Compute the result of an operation into a new register.
+
+ Compute ("code" "op0" "op1") and store the result in a new register of mode
+ "mode".
+
+ @param[in] mode Mode of the result
+ @parma[in] code RTX for the operation to perform
+ @param[in] op0 RTX for the first operand
+ @param[in] op1 RTX for the second operand
+
+ @return The RTX for the new register. */
+/* -------------------------------------------------------------------------- */
+static rtx
+or1k_force_binary (enum machine_mode mode,
+ enum rtx_code code,
+ rtx op0,
+ rtx op1)
+{
+ rtx reg;
+
+ reg = gen_reg_rtx (mode);
+ or1k_emit_binary (code, reg, op0, op1);
+
+ return reg;
+
+} /* or1k_force_binary () */
+
+
+/* ========================================================================== */
+/* Global support functions */
+
+static int
+or1k_trampoline_code_words (void)
+{
+ int words = 5;
+
+ /* need one more word in TARGET_DELAY_COMPAT mode to hold l.nop in delay slot */
+ if (TARGET_DELAY_COMPAT)
+ words++;
+
+ return words;
+}
+
+/* -------------------------------------------------------------------------- */
+/* Return the size in bytes of the trampoline code.
+
+ Padded to TRAMPOLINE_ALIGNMENT bits. The code sequence is documented in
+ or1k_trampoline_init ().
+
+ This is just the code size. the static chain pointer and target function
+ address immediately follow.
+
+ @return The size of the trampoline code in bytes. */
+/* -------------------------------------------------------------------------- */
+int
+or1k_trampoline_code_size (void)
+{
+ const int TRAMP_BYTE_ALIGN = TRAMPOLINE_ALIGNMENT / 8;
+
+ return (or1k_trampoline_code_words() * 4 + TRAMP_BYTE_ALIGN - 1) / TRAMP_BYTE_ALIGN * TRAMP_BYTE_ALIGN;
+
+} /* or1k_trampoline_code_size () */
+
+
+/* ========================================================================== */
+/* Functions to support the Machine Description */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Expand a prologue pattern.
+
+ Called after register allocation to add any instructions needed for the
+ prologue. Using a prologue insn is favored compared to putting all of the
+ instructions in output_function_prologue(), since it allows the scheduler
+ to intermix instructions with the saves of the caller saved registers. In
+ some cases, it might be necessary to emit a barrier instruction as the last
+ insn to prevent such scheduling. */
+/* -------------------------------------------------------------------------- */
+void
+or1k_expand_prologue (void)
+{
+ int total_size = or1k_compute_frame_size (get_frame_size ());
+ rtx insn;
+
+ if (!total_size)
+ /* No frame needed. */
+ return;
+
+ gcc_assert (!frame_info.save_lr_p || !frame_info.save_fp_p
+ || frame_info.lr_save_offset != frame_info.fp_save_offset);
+
+ if (frame_info.gpr_frame)
+ emit_frame_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (-frame_info.gpr_frame)));
+ if (frame_info.save_fp_p)
+ {
+ emit_frame_insn (gen_rtx_SET (Pmode,
+ stack_disp_mem (frame_info.fp_save_offset),
+ hard_frame_pointer_rtx));
+
+ emit_frame_insn
+ (gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, const0_rtx));
+ }
+ if (frame_info.save_lr_p)
+ {
+ emit_frame_insn
+ (gen_rtx_SET (Pmode, stack_disp_mem (frame_info.lr_save_offset),
+ gen_rtx_REG (Pmode, LINK_REGNUM)));
+ }
+ if (frame_info.gpr_size)
+ {
+ int offset = 0;
+ int regno;
+
+ for (regno = 0; regno <= OR1K_LAST_ACTUAL_REG; regno++)
+ {
+ if (!(frame_info.mask & ((HOST_WIDE_INT) 1 << regno)))
+ continue;
+
+ /* Check that the offsets aren't stepping on lr/fp slots */
+ gcc_assert (!frame_info.save_lr_p
+ || ((frame_info.gpr_offset + offset)
+ != frame_info.lr_save_offset));
+ gcc_assert (!frame_info.save_fp_p
+ || ((frame_info.gpr_offset + offset)
+ != frame_info.fp_save_offset));
+
+ emit_frame_insn
+ (gen_rtx_SET (Pmode,
+ stack_disp_mem (frame_info.gpr_offset + offset),
+ gen_rtx_REG (Pmode, regno)));
+ offset = offset + UNITS_PER_WORD;
+ }
+ }
+
+ /* Update the stack pointer to reflect frame size. */
+ total_size = frame_info.late_frame;
+ insn = gen_add2_insn (stack_pointer_rtx, GEN_INT (-total_size));
+ if (total_size > 32768)
+ {
+ rtx note = insn;
+ rtx value_rtx = gen_rtx_REG (Pmode, PROLOGUE_TMP);
+
+ or1k_emit_set_const32 (value_rtx, GEN_INT (-total_size));
+ if (frame_info.save_fp_p)
+ insn = gen_frame_alloc_fp (value_rtx);
+ else
+ insn = gen_add2_insn (stack_pointer_rtx, value_rtx);
+ insn = emit_frame_insn (insn);
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
+ }
+ else if (total_size)
+ {
+ if (frame_info.save_fp_p)
+ emit_frame_insn (gen_frame_alloc_fp (GEN_INT (-total_size)));
+ else
+ emit_frame_insn (insn);
+ }
+ /* Emit got pointer acquiring if there are any got references or
+ this function has calls */
+ if (crtl->uses_pic_offset_table || (flag_pic && frame_info.save_lr_p))
+ {
+ SET_REGNO (pic_offset_table_rtx, PIC_OFFSET_TABLE_REGNUM);
+ emit_insn (gen_set_got (pic_offset_table_rtx));
+ }
+
+} /* or1k_expand_prologue () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Expand an epilogue pattern.
+
+ Called after register allocation to add any instructions needed for the
+ epilogue. Using an epilogue insn is favored compared to putting all of the
+ instructions in output_function_epilogue(), since it allows the scheduler
+ to intermix instructions with the restores of the caller saved registers.
+ In some cases, it might be necessary to emit a barrier instruction as the
+ first insn to prevent such scheduling. */
+/* -------------------------------------------------------------------------- */
+void
+or1k_expand_epilogue (void)
+{
+ int total_size = or1k_compute_frame_size (get_frame_size ());
+
+ if (frame_info.save_fp_p)
+ {
+ emit_insn (gen_frame_dealloc_fp ());
+ emit_insn
+ (gen_rtx_SET (Pmode, hard_frame_pointer_rtx,
+ stack_disp_mem (frame_info.fp_save_offset)));
+ }
+ else
+ {
+ rtx value_rtx;
+
+ total_size = frame_info.late_frame;
+ if (total_size > 32767)
+ {
+ value_rtx = gen_rtx_REG (Pmode, EPILOGUE_TMP);
+ or1k_emit_set_const32 (value_rtx, GEN_INT (total_size));
+ }
+ else if (frame_info.late_frame)
+ value_rtx = GEN_INT (total_size);
+ if (total_size)
+ emit_insn (gen_frame_dealloc_sp (value_rtx));
+ }
+
+ /* eh_return sets the LR, do not overwrite it */
+ if (frame_info.save_lr_p && !crtl->calls_eh_return)
+ {
+ emit_insn
+ (gen_rtx_SET (Pmode, gen_rtx_REG (Pmode, LINK_REGNUM),
+ stack_disp_mem (frame_info.lr_save_offset)));
+ }
+
+ if (frame_info.gpr_size)
+ {
+ int offset = 0;
+ int regno;
+
+ for (regno = 2; regno <= OR1K_LAST_ACTUAL_REG; regno++)
+ {
+ if (!(frame_info.mask & ((HOST_WIDE_INT) 1 << regno)))
+ continue;
+
+ if (regno != FIRST_PSEUDO_REGISTER)
+ emit_insn
+ (gen_rtx_SET (Pmode, gen_rtx_REG (Pmode, regno),
+ stack_disp_mem (frame_info.gpr_offset + offset)));
+ offset = offset + UNITS_PER_WORD;
+ }
+ }
+
+ if (crtl->calls_eh_return)
+ emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
+
+ if (frame_info.gpr_frame)
+ emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (frame_info.gpr_frame)));
+ emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, 9)));
+
+} /* or1k_expand_epilogue () */
+
+
+
+/* -------------------------------------------------------------------------- */
+/*!Generate assembler code for a movdi/movdf pattern
+
+ @param[in] operands Operands to the movdx pattern.
+
+ @return The assembler string to output (always "", since we've done the
+ output here). */
+/* -------------------------------------------------------------------------- */
+const char *
+or1k_output_move_double (rtx *operands)
+{
+ rtx xoperands[3];
+
+ switch (GET_CODE (operands[0]))
+ {
+ case REG:
+ if (GET_CODE (operands[1]) == REG)
+ {
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ {
+ output_asm_insn ("\tl.or \t%H0, %H1, r0", operands);
+ output_asm_insn ("\tl.or \t%0, %1, r0", operands);
+ return "";
+ }
+ else
+ {
+ output_asm_insn ("\tl.or \t%0, %1, r0", operands);
+ output_asm_insn ("\tl.or \t%H0, %H1, r0", operands);
+ return "";
+ }
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ {
+ xoperands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (xoperands[1]) == REG)
+ {
+ xoperands[0] = operands[0];
+ if (REGNO (xoperands[0]) == REGNO (xoperands[1]))
+ {
+ output_asm_insn ("\tl.lwz \t%H0, 4(%1)", xoperands);
+ output_asm_insn ("\tl.lwz \t%0, 0(%1)", xoperands);
+ return "";
+ }
+ else
+ {
+ output_asm_insn ("\tl.lwz \t%0, 0(%1)", xoperands);
+ output_asm_insn ("\tl.lwz \t%H0, 4(%1)", xoperands);
+ return "";
+ }
+ }
+ else if (GET_CODE (xoperands[1]) == PLUS)
+ {
+ if (GET_CODE (xoperands[2] = XEXP (xoperands[1], 1)) == REG)
+ {
+ xoperands[0] = operands[0];
+ xoperands[1] = XEXP (xoperands[1], 0);
+ if (REGNO (xoperands[0]) == REGNO (xoperands[2]))
+ {
+ output_asm_insn ("\tl.lwz \t%H0, %1+4(%2)",
+ xoperands);
+ output_asm_insn ("\tl.lwz \t%0, %1(%2)", xoperands);
+ return "";
+ }
+ else
+ {
+ output_asm_insn ("\tl.lwz \t%0, %1(%2)", xoperands);
+ output_asm_insn ("\tl.lwz \t%H0, %1+4(%2)",
+ xoperands);
+ return "";
+ }
+ }
+ else if (GET_CODE (xoperands[2] = XEXP (xoperands[1], 0)) ==
+ REG)
+ {
+ xoperands[0] = operands[0];
+ xoperands[1] = XEXP (xoperands[1], 1);
+ if (REGNO (xoperands[0]) == REGNO (xoperands[2]))
+ {
+ output_asm_insn ("\tl.lwz \t%H0, %1+4(%2)",
+ xoperands);
+ output_asm_insn ("\tl.lwz \t%0, %1(%2)", xoperands);
+ return "";
+ }
+ else
+ {
+ output_asm_insn ("\tl.lwz \t%0, %1(%2)", xoperands);
+ output_asm_insn ("\tl.lwz \t%H0, %1+4(%2)",
+ xoperands);
+ return "";
+ }
+ }
+ else
+ abort ();
+ }
+ else
+ abort ();
+ }
+ else
+ abort ();
+ case MEM:
+ xoperands[0] = XEXP (operands[0], 0);
+ if (GET_CODE (xoperands[0]) == REG)
+ {
+ xoperands[1] = operands[1];
+ output_asm_insn ("\tl.sw \t0(%0), %1", xoperands);
+ output_asm_insn ("\tl.sw \t4(%0), %H1", xoperands);
+ return "";
+ }
+ else if (GET_CODE (xoperands[0]) == PLUS)
+ {
+ if (GET_CODE (xoperands[1] = XEXP (xoperands[0], 1)) == REG)
+ {
+ xoperands[0] = XEXP (xoperands[0], 0);
+ xoperands[2] = operands[1];
+ output_asm_insn ("\tl.sw \t%0(%1), %2", xoperands);
+ output_asm_insn ("\tl.sw \t%0+4(%1), %H2", xoperands);
+ return "";
+ }
+ else if (GET_CODE (xoperands[1] = XEXP (xoperands[0], 0)) == REG)
+ {
+ xoperands[0] = XEXP (xoperands[0], 1);
+ xoperands[2] = operands[1];
+ output_asm_insn ("\tl.sw \t%0(%1), %2", xoperands);
+ output_asm_insn ("\tl.sw \t%0+4(%1), %H2", xoperands);
+ return "";
+ }
+ else
+ abort ();
+ }
+ else
+ {
+ fprintf (stderr, " O/p error %s\n",
+ GET_RTX_NAME (GET_CODE (xoperands[0])));
+ return "";
+ /* abort (); */
+ }
+ default:
+ abort ();
+ }
+} /* or1k_output_move_double () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Expand a conditional branch
+
+ @param[in] operands Operands to the branch.
+ @param[in] mode Mode of the comparison. */
+/* -------------------------------------------------------------------------- */
+void
+or1k_expand_conditional_branch (rtx *operands,
+ enum machine_mode mode)
+{
+ rtx tmp;
+ enum rtx_code test_code = GET_CODE(operands[0]);
+
+ switch (mode)
+ {
+ case SImode:
+ tmp = or1k_expand_compare (test_code, operands[1], operands[2]);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode,
+ tmp,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
+ return;
+
+ case SFmode:
+ tmp = or1k_expand_compare (test_code, operands[1], operands[2]);
+ tmp = gen_rtx_IF_THEN_ELSE (VOIDmode,
+ tmp,
+ gen_rtx_LABEL_REF (VOIDmode, operands[3]),
+ pc_rtx);
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
+ return;
+
+ default:
+ abort ();
+ }
+
+} /* or1k_expand_conditional_branch () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Emit a conditional move
+
+ move "true_cond" to "dest" if "op" of the operands of the last comparison
+ is nonzero/true, "false_cond" if it is zero/false.
+
+ @param[in] dest RTX for the destination operand.
+ @param[in] op RTX for the comparison operation
+ @param[in] true_cond RTX to move to dest if condition is TRUE.
+ @param[in] false_cond RTX to move to dest if condition is FALSE.
+
+ @return Non-zero (TRUE) if the hardware supports such an operation, zero
+ (FALSE) otherwise. */
+/* -------------------------------------------------------------------------- */
+int
+or1k_emit_cmove (rtx dest,
+ rtx op,
+ rtx true_cond,
+ rtx false_cond)
+{
+ enum machine_mode result_mode = GET_MODE (dest);
+
+ if (GET_MODE (true_cond) != result_mode)
+ return 0;
+
+ if (GET_MODE (false_cond) != result_mode)
+ return 0;
+
+ /* First, work out if the hardware can do this at all */
+ return or1k_emit_int_cmove (dest, op, true_cond, false_cond);
+
+} /* or1k_emit_cmove () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Output the assembler for a branch on flag instruction.
+
+ @param[in] operands Operands to the branch.
+
+ @return The assembler string to use. */
+/* -------------------------------------------------------------------------- */
+const char *
+or1k_output_bf (rtx * operands)
+{
+ enum rtx_code code;
+ enum machine_mode mode_calc, mode_got;
+
+ code = GET_CODE (operands[1]);
+ mode_calc = or1k_select_cc_mode (code);
+ mode_got = GET_MODE (operands[2]);
+
+ if (mode_calc != mode_got)
+ return "l.bnf\t%l0%(";
+ else
+ return "l.bf\t%l0%(";
+} /* or1k_output_bf () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Output the assembler for a conditional move instruction.
+
+ @param[in] operands Operands to the conditional move.
+
+ @return The assembler string to use. */
+/* -------------------------------------------------------------------------- */
+const char *
+or1k_output_cmov (rtx * operands)
+{
+ enum rtx_code code;
+ enum machine_mode mode_calc, mode_got;
+
+ code = GET_CODE (operands[1]);
+ mode_calc = or1k_select_cc_mode (code);
+ mode_got = GET_MODE (operands[4]);
+
+ if (mode_calc != mode_got)
+ return "l.cmov\t%0,%3,%2"; /* reversed */
+ else
+ return "l.cmov\t%0,%2,%3";
+
+} /* or1k_output_cmov () */
+
+/* -------------------------------------------------------------------------- */
+/*!Load a 32-bit constant.
+
+ We know it can't be done in one insn when we get here, the movsi expander
+ guarantees this.
+
+ @param[in] op0 RTX for the destination.
+ @param[in] op1 RTX for the (constant) source. */
+/* -------------------------------------------------------------------------- */
+void
+or1k_emit_set_const32 (rtx op0,
+ rtx op1)
+{
+ enum machine_mode mode = GET_MODE (op0);
+ rtx temp;
+
+ /* Sanity check that we really can't do it in one instruction. I.e that we
+ don't have a 16-bit constant. */
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (op1) & GET_MODE_MASK (mode);
+
+ if ((-32768 <= val) && (val <= 32767))
+ {
+ abort ();
+ }
+ }
+
+ /* Full 2-insn decomposition is needed. */
+ if (reload_in_progress || reload_completed)
+ temp = op0;
+ else
+ temp = gen_reg_rtx (mode);
+
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ /* Emit them as real moves instead of a HIGH/LO_SUM,
+ this way CSE can see everything and reuse intermediate
+ values if it wants. */
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ GEN_INT (INTVAL (op1)
+ & ~(HOST_WIDE_INT) 0xffff)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ op0,
+ gen_rtx_IOR (mode, temp,
+ GEN_INT (INTVAL (op1) & 0xffff))));
+ }
+ else
+ {
+ /* since or1k bfd can not deal with relocs that are not of type
+ OR1K_CONSTH_RELOC + OR1K_CONST_RELOC (ie move high must be
+ followed by exactly one lo_sum)
+ */
+ emit_insn (gen_movsi_insn_big (op0, op1));
+ }
+} /* or1k_emit_set_const32 () */
+
+
+/* ========================================================================== */
+/* Target hook functions.
+
+ These are initialized at the end of this file, to avoid having to
+ predeclare all the functions. They are only needed here, so are static. */
+
+
+
+
+/* -------------------------------------------------------------------------- */
+/*!Define where a function returns values.
+
+ Define this to return an RTX representing the place where a function
+ returns or receives a value of data type ret type, a tree node representing
+ a data type. "func" is a tree node representing FUNCTION_DECL or
+ FUNCTION_TYPE of a function being called. If "outgoing" is false, the hook
+ should compute the register in which the caller will see the return
+ value. Otherwise, the hook should return an RTX representing the place
+ where a function returns a value.
+
+ On many machines, only TYPE_MODE ("ret_type") is relevant. (Actually, on
+ most machines, scalar values are returned in the same place regardless of
+ mode.) The value of the expression is usually a reg RTX for the hard
+ register where the return value is stored. The value can also be a parallel
+ RTX, if the return value is in multiple places. See FUNCTION_ARG for an
+ explanation of the parallel form. Note that the callee will populate every
+ location specified in the parallel, but if the first element of the
+ parallel contains the whole return value, callers will use that element as
+ the canonical location and ignore the others. The m68k port uses this type
+ of parallel to return pointers in both ‘%a0’ (the canonical location) and
+ ‘%d0’.
+
+ If TARGET_PROMOTE_FUNCTION_RETURN returns true, you must apply the same
+ promotion rules specified in PROMOTE_MODE if valtype is a scalar type.
+
+ If the precise function being called is known, "func" is a tree node
+ (FUNCTION_DECL) for it; otherwise, "func" is a null pointer. This makes it
+ possible to use a different value-returning convention for specific
+ functions when all their calls are known.
+
+ Some target machines have "register windows" so that the register in which
+ a function returns its value is not the same as the one in which the caller
+ sees the value. For such machines, you should return different RTX
+ depending on outgoing.
+
+ TARGET_FUNCTION_VALUE is not used for return values with aggregate data
+ types, because these are returned in another way. See
+ TARGET_STRUCT_VALUE_RTX and related macros.
+
+ For the OR1K, we can just use the result of LIBCALL_VALUE, since all
+ functions return their result in the same place (register rv = r11).
+
+ JPB 30-Aug-10: What about 64-bit scalar returns (long long int, double),
+ which also use rvh (=r12)?
+
+ @param[in] ret_type The return type of the function.
+ @param[in] func Tree representing function being called.
+ @param[in] outgoing Non-zero (TRUE) if the result represents where the
+ function places the results, zero (FALSE) if the
+ result represents where the caller sees the result.
+
+ @return A RTX representing where the result can be found. */
+/* -------------------------------------------------------------------------- */
+static rtx
+or1k_function_value (const_tree ret_type,
+ const_tree func ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return LIBCALL_VALUE (TYPE_MODE(ret_type));
+
+} /* or1k_function_value () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Check if a function is suitable for tail call optimization.
+
+ True if it is OK to do sibling call optimization for the specified call
+ expression "exp". "decl" will be the called function, or NULL if this is an
+ indirect call.
+
+ It is not uncommon for limitations of calling conventions to prevent tail
+ calls to functions outside the current unit of translation, or during PIC
+ compilation. The hook is used to enforce these restrictions, as the sibcall
+ md pattern can not fail, or fall over to a “normal” call. The criteria for
+ successful sibling call optimization may vary greatly between different
+ architectures.
+
+ For the OR1K, we currently don't allow sibcalls.
+
+ @param[in] decl The function for which we may optimize
+ @param[in] exp The call expression which is candidate for optimization.
+
+ @return Non-zero (TRUE) if sibcall optimization is permitted, zero (FALSE)
+ otherwise. */
+/* -------------------------------------------------------------------------- */
+static bool
+or1k_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
+ tree exp ATTRIBUTE_UNUSED)
+{
+ return 0;
+} /* or1k_function_ok_for_sibcall () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Should an argument be passed by reference.
+
+ This target hook should return true if an argument at the position
+ indicated by "cum" should be passed by reference. This predicate is queried
+ after target independent reasons for being passed by reference, such as
+ TREE_ADDRESSABLE ("type").
+
+ If the hook returns TRUE, a copy of that argument is made in memory and a
+ pointer to the argument is passed instead of the argument itself. The
+ pointer is passed in whatever way is appropriate for passing a pointer to
+ that type.
+
+ For the OR1K, all aggregates and arguments greater than 8 bytes are passed
+ this way.
+
+ @param[in] cum Position of argument under consideration.
+ @param[in[ mode Not sure what this relates to.
+ @param[in] type Type of the argument.
+ @param[in] named Not sure what this relates to.
+
+ @return Non-zero (TRUE) if the argument should be passed by reference,
+ zero (FALSE) otherwise. */
+/* -------------------------------------------------------------------------- */
+static bool
+or1k_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return (type && (AGGREGATE_TYPE_P (type) || int_size_in_bytes (type) > 8));
+
+} /* or1k_pass_by_reference () */
+
+
+int
+or1k_initial_elimination_offset(int from, int to)
+{
+ or1k_compute_frame_size (get_frame_size ());
+ return ((from == FRAME_POINTER_REGNUM
+ ? frame_info.gpr_offset : frame_info.gpr_frame)
+ + (to == STACK_POINTER_REGNUM ? frame_info.late_frame : 0));
+}
+
+
+/* -------------------------------------------------------------------------- */
+/*!How many bytes at the beginning of an argument must be put into registers.
+
+ This target hook returns the number of bytes at the beginning of an
+ argument that must be put in registers. The value must be zero for
+ arguments that are passed entirely in registers or that are entirely pushed
+ on the stack.
+
+ On some machines, certain arguments must be passed partially in registers
+ and partially in memory. On these machines, typically the first few words
+ of arguments a re passed in registers, and the rest on the stack. If a
+ multi-word argument (a double or a structure) crosses that boundary, its
+ first few words must be passed in registers and the rest must be
+ pushed. This macro tells the compiler when this occurs, and how many bytes
+ should go in registers.
+
+ FUNCTION_ARG for these arguments should return the first register to be
+ used by the caller for this argument; likewise FUNCTION_INCOMING_ARG, for
+ the called function.
+
+ On the OR1K we never split argumetns between registers and memory.
+
+ JPB 30-Aug-10: Is this correct? Surely we should allow this. The ABI spec
+ is incomplete on this point.
+
+ @param[in] cum Position of argument under consideration.
+ @param[in[ mode Not sure what this relates to.
+ @param[in] type Type of the argument.
+ @param[in] named Not sure what this relates to.
+
+ @return The number of bytes of the argument to go into registers */
+/* -------------------------------------------------------------------------- */
+static int
+or1k_arg_partial_bytes (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return 0;
+
+} /* or1k_arg_partial_bytes () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Promote the mode of a function's arguments/return value.
+
+ Like PROMOTE_MODE, but it is applied to outgoing function arguments or
+ function return values. The target hook should return the new mode and
+ possibly change "*punsignedp" if the promotion should change
+ signedness. This function is called only for scalar or pointer types.
+
+ "for_return" allows to distinguish the promotion of arguments and return
+ values. If it is 1, a return value is being promoted and
+ TARGET_FUNCTION_VALUE must perform the same promotions done here. If it is
+ 2, the returned mode should be that of the register in which an incoming
+ parameter is copied, or the outgoing result is computed; then the hook
+ should return the same mode as PROMOTE_MODE, though the signedness may be
+ different.
+
+ The default is to not promote arguments and return values. You can also
+ define the hook to "default_promote_function_mode_always_promote" if you
+ would like to apply the same rules given by PROMOTE_MODE.
+
+ For the OR1K, if the size of the mode is integral and less than 4, we
+ promote to SImode, otherwise we return the mode we are supplied.
+
+ @param[in] type Not sure. Type of the argument?
+ @param[in] mode The mode of argument/return value to consider.
+ @param[out] punsignedp Signedness of the value.
+ @param[in] fntype Not sure. Type of the function?
+ @param[in] for_return 1 if a return value, 2 if an incoming value.
+
+ @return The new mode. */
+/* -------------------------------------------------------------------------- */
+static enum machine_mode
+or1k_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ int *punsignedp ATTRIBUTE_UNUSED,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ int for_return ATTRIBUTE_UNUSED)
+{
+ return ( (GET_MODE_CLASS (mode) == MODE_INT)
+ && (GET_MODE_SIZE (mode) < 4)) ? SImode : mode;
+
+} /* or1k_promote_function_mode () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Is this a legitimate address?
+
+ A function that returns whether x (an RTX) is a legitimate memory address on
+ the target machine for a memory operand of mode mode.
+
+ Legitimate addresses are defined in two variants: a strict variant and a
+ non-strict one. The strict parameter chooses which variant is desired by
+ the caller.
+
+ The strict variant is used in the reload pass. It must be defined so that
+ any pseudo- register that has not been allocated a hard register is
+ considered a memory reference. This is because in contexts where some kind
+ of register is required, a pseudo-register with no hard register must be
+ rejected. For non-hard registers, the strict variant should look up the
+ reg_renumber array; it should then proceed using the hard register number in
+ the array, or treat the pseudo as a memory reference if the array holds -1.
+
+ The non-strict variant is used in other passes. It must be defined to accept
+ all pseudo-registers in every context where some kind of register is
+ required.
+
+ Normally, constant addresses which are the sum of a symbol_ref and an
+ integer are stored inside a const RTX to mark them as constant. Therefore,
+ there is no need to recognize such sums specifically as legitimate
+ addresses. Normally you would simply recognize any const as legitimate.
+
+ Usually PRINT_OPERAND_ADDRESS is not prepared to handle constant sums that
+ are not marked with const. It assumes that a naked plus indicates
+ indexing. If so, then you must reject such naked constant sums as
+ illegitimate addresses, so that none of them will be given to
+ PRINT_OPERAND_ADDRESS.
+
+ On some machines, whether a symbolic address is legitimate depends on the
+ section that the address refers to. On these machines, define the target
+ hook TARGET_ENCODE_ SECTION_INFO to store the information into the
+ symbol_ref, and then check for it here. When you see a const, you will have
+ to look inside it to find the symbol_ref in order to determine the
+ section. See the internals manual section on "Assembler Format" for more
+ info.
+
+ Some ports are still using a deprecated legacy substitute for this hook, the
+ GO_IF_LEGITIMATE_ADDRESS macro. This macro has this syntax:
+
+ #define GO_IF_LEGITIMATE_ADDRESS (mode, x, label )
+
+ and should goto label if the address x is a valid address on the target
+ machine for a memory operand of mode mode. Whether the strict or non-strict
+ variants are desired is defined by the REG_OK_STRICT macro introduced
+ earlier in this section. Using the hook is usually simpler because it limits
+ the number of files that are recompiled when changes are made.
+
+ The OR1K only has a single addressing mode, which is a base register with
+ 16-bit displacement. We can accept just 16-bit constants as addresses (they
+ can use r0 as base address, and we can accept plain registers as addresses
+ (they can use a displacement of zero).
+
+ @param[in] mode The mode of the address
+ @param[in] x The address (RTX)
+ @param[in] strict Non-zero (TRUE) if we are in "strict" mode, zero (FALSE)
+ otherwise.
+
+ @return Non-zero (TRUE) if this is a legitimate address, zero (FALSE)
+ otherwise. */
+/* -------------------------------------------------------------------------- */
+static bool
+or1k_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx x,
+ bool strict)
+{
+ /* You might think 16-bit constants are suitable. They can be built into
+ addresses using r0 as the base. However this seems to lead to defective
+ code. So for now this is a placeholder, and this code is not used.
+
+ if (or1k_legitimate_displacement_p (mode, x))
+ {
+ return 1;
+ }
+ */
+ /* Addresses consisting of a register and 16-bit displacement are also
+ suitable. We need the mode, since for double words, we had better be
+ able to address the full 8 bytes. */
+ if (GET_CODE(x) == PLUS)
+ {
+ rtx reg = XEXP(x,0);
+
+ /* If valid register... */
+ if ((GET_CODE(reg) == REG)
+ && or1k_regnum_ok_for_base_p (REGNO (reg), strict))
+ {
+ rtx offset = XEXP(x,1);
+
+ /* ...and valid offset */
+ if (or1k_legitimate_displacement_p (mode, offset))
+ {
+ return 1;
+ }
+ }
+ }
+
+ /* Addresses consisting of just a register are OK. They can be built into
+ addresses using an offset of zero (and an offset of four if double
+ word). */
+ if (GET_CODE(x) == REG
+ && or1k_regnum_ok_for_base_p(REGNO(x),strict)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/*!Initialize a trampoline for nested functions.
+
+ A nested function is defined by *two* pieces of information, the address of
+ the function (like any other function) and a pointer to the frame of the
+ enclosing function. The latter is required to allow the nested function to
+ access local variables in the enclosing function's frame.
+
+ This represents a problem, since a function in C is represented as an
+ address that can be held in a single variable as a pointer. Requiring two
+ pointers will not fit.
+
+ The solution is documented in "Lexical Closures for C++" by Thomas
+ M. Breuel (USENIX C++ Conference Proceedings, October 17-21, 1988). The
+ nested function is represented by a small block of code and data on the
+ enclosing function's stack frame, which sets up a pointer to the enclosing
+ function's stack frame (the static chain pointer) in a register defined by
+ the ABI, and then jumps to the code of the function proper.
+
+ The function can be represented as a single pointer to this block of code,
+ known as a trampoline, which when called generates both pointers
+ needed. The nested function (which knows it is a nested function at compile
+ time) can then generate code to access the enclosing frame via the static
+ chain register.
+
+ There is a catch that the trampoline is set up as data, but executed as
+ instructions. The former will be via the data cache, the latter via the
+ instruction cache. There is a risk that a later trampoline will not be seen
+ by the instruction cache, so the wrong code will be executed. So the
+ instruction cache should be flushed for the trampoline address range.
+
+ This hook is called to initialize a trampoline. "m_tramp" is an RTX for the
+ memory block for the trampoline; "fndecl" is the FUNCTION_DECL for the
+ nested function; "static_chain" is an RTX for the static chain value that
+ should be passed to the function when it is called.
+
+ If the target defines TARGET_ASM_TRAMPOLINE_TEMPLATE, then the first thing
+ this hook should do is emit a block move into "m_tramp" from the memory
+ block returned by assemble_trampoline_template. Note that the block move
+ need only cover the constant parts of the trampoline. If the target
+ isolates the variable parts of the trampoline to the end, not all
+ TRAMPOLINE_SIZE bytes need be copied.
+
+ If the target requires any other actions, such as flushing caches or
+ enabling stack execution, these actions should be performed after
+ initializing the trampoline proper.
+
+ For the OR1K, no static chain register is used. We choose to use the return
+ value (rv) register. The code is based on that for MIPS.
+ The trampoline code is:
+
+ l.movhi r11,hi(end_addr)
+ l.ori r11,lo(end_addr)
+ l.lwz r13,4(r11)
+ l.jr r13
+ l.lwz r11,0(r11)
+ end_addr:
+ .word
+ .word
+
+ @note For the OR1K we need to flush the instruction cache, which is a
+ privileged operation. Needs fixing.
+
+ @param[in] m_tramp The lowest address of the trampoline on the stack.
+ @param[in] fndecl Declaration of the enclosing function.
+ @param[in] chain_value Static chain pointer to pass to the nested
+ function. */
+/* -------------------------------------------------------------------------- */
+static void
+or1k_trampoline_init (rtx m_tramp,
+ tree fndecl,
+ rtx chain_value)
+{
+ rtx addr; /* Start address of the trampoline */
+ rtx end_addr; /* End address of the code block */
+
+ rtx high; /* RTX for the high part of end_addr */
+ rtx low; /* RTX for the low part of end_addr */
+ rtx opcode; /* RTX for generated opcodes */
+ rtx mem; /* RTX for trampoline memory */
+
+ rtx *trampoline; /* The trampoline code */
+
+ unsigned int i; /* Index into trampoline */
+ unsigned int j; /* General counter */
+
+ HOST_WIDE_INT end_addr_offset; /* Offset to end of code */
+ HOST_WIDE_INT static_chain_offset; /* Offset to stack chain word */
+ HOST_WIDE_INT target_function_offset; /* Offset to func address word */
+
+ /* Work out the offsets of the pointers from the start of the trampoline
+ code. */
+ trampoline = (rtx*) alloca (or1k_trampoline_code_words() * sizeof(rtx));
+ end_addr_offset = or1k_trampoline_code_size ();
+ static_chain_offset = end_addr_offset;
+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
+
+ /* Get pointers in registers to the beginning and end of the code block. */
+ addr = force_reg (Pmode, XEXP (m_tramp, 0));
+ end_addr = or1k_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
+
+ /* Build up the code in TRAMPOLINE.
+
+ l.movhi r11,hi(end_addr)
+ l.ori r11,lo(end_addr)
+ l.lwz r13,4(r11)
+ l.jr r13
+ l.lwz r11,0(r11)
+ end_addr:
+ */
+
+ i = 0;
+
+ /* Break out the high and low parts of the end_addr */
+ high = expand_simple_binop (SImode, LSHIFTRT, end_addr, GEN_INT (16),
+ NULL, false, OPTAB_WIDEN);
+ low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
+
+ /* Emit the l.movhi, adding an operation to OR in the high bits from the
+ RTX. */
+ opcode = gen_int_mode (OR1K_MOVHI (11, 0), SImode);
+ trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high, NULL,
+ false, OPTAB_WIDEN);
+
+ /* Emit the l.ori, adding an operations to OR in the low bits from the
+ RTX. */
+ opcode = gen_int_mode (OR1K_ORI (11, 11, 0), SImode);
+ trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low, NULL,
+ false, OPTAB_WIDEN);
+
+ /* Emit the l.lwz of the function address. No bits to OR in here, so we can
+ do the opcode directly. */
+ trampoline[i++] =
+ gen_int_mode (OR1K_LWZ (13, 11, target_function_offset - end_addr_offset),
+ SImode);
+
+ if (TARGET_DELAY_ON) {
+ /* Emit the l.jr of the function. No bits to OR in here, so we can do the
+ opcode directly. */
+ trampoline[i++] = gen_int_mode (OR1K_JR (13), SImode);
+
+ /* Emit the l.lwz of the static chain. No bits to OR in here, so we can
+ do the opcode directly. */
+ trampoline[i++] =
+ gen_int_mode (OR1K_LWZ (STATIC_CHAIN_REGNUM, 11,
+ static_chain_offset - end_addr_offset), SImode);
+ } else {
+ trampoline[i++] =
+ gen_int_mode (OR1K_LWZ (STATIC_CHAIN_REGNUM, 11,
+ static_chain_offset - end_addr_offset), SImode);
+ trampoline[i++] = gen_int_mode (OR1K_JR (13), SImode);
+ if (TARGET_DELAY_COMPAT)
+ trampoline[i++] = gen_int_mode (OR1K_NOP, SImode);
+ }
+
+ /* Copy the trampoline code. Leave any padding uninitialized. */
+ for (j = 0; j < i; j++)
+ {
+ mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
+ or1k_emit_move (mem, trampoline[j]);
+ }
+
+ /* Set up the static chain pointer field. */
+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
+ or1k_emit_move (mem, chain_value);
+
+ /* Set up the target function field. */
+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
+ or1k_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
+
+ /* Flushing the trampoline from the instruction cache needs to be done
+ here. */
+
+} /* or1k_trampoline_init () */
+
+
+/* -------------------------------------------------------------------------- */
+/*!Provide support for DW_AT_calling_convention
+
+ Define this to enable the dwarf attribute DW_AT_calling_convention to be
+ emitted for each function. Instead of an integer return the enum value for
+ the DW_CC_ tag.
+
+ To support optional call frame debugging information, you must also define
+ INCOMING_RETURN_ADDR_RTX and either set RTX_FRAME_RELATED_P on the prologue
+ insns if you use RTL for the prologue, or call "dwarf2out_def_cfa" and
+ "dwarf2out_reg_save" as appropriate from TARGET_ASM_FUNCTION_PROLOGUE if
+ you don’t.
+
+ For the OR1K, it should be sufficient to return DW_CC_normal in all cases.
+
+ @param[in] function The function requiring debug information
+
+ @return The enum of the DW_CC tag. */
+/* -------------------------------------------------------------------------- */
+static int
+or1k_dwarf_calling_convention (const_tree function ATTRIBUTE_UNUSED)
+{
+ return DW_CC_normal;
+
+} /* or1k_dwarf_calling_convention () */
+
+/* ========================================================================== */
+/* Target hook initialization.
+
+ In most cases these use the static functions declared above. They have
+ defaults, so must be undefined first, before being redefined.
+
+ The description of what they do is found with the function above, unless it
+ is a standard function or a constant, in which case it is defined here (as
+ with TARGET_ASM_NAMED_SECTION).
+
+ The final declaration is of the global "targetm" structure. */
+
+/* Output assembly directives to switch to section name. The section should
+ have attributes as specified by flags, which is a bit mask of the SECTION_*
+ flags defined in ‘output.h’. If decl is non-NULL, it is the VAR_DECL or
+ FUNCTION_DECL with which this section is associated.
+
+ For OR1K, we use the default ELF sectioning. */
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE or1k_function_value
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL or1k_function_ok_for_sibcall
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE or1k_pass_by_reference
+
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES or1k_arg_partial_bytes
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE or1k_option_override
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START or1k_asm_file_start
+
+/* This target hook returns TRUE if an argument declared in a prototype as an
+ integral type smaller than int should actually be passed as an int. In
+ addition to avoiding errors in certain cases of mismatch, it also makes for
+ better code on certain machines.
+
+ The default is to not promote prototypes.
+
+ For the OR1K we do require this, so use a utility hook, which always
+ returns TRUE. */
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE or1k_promote_function_mode
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P or1k_legitimate_address_p
+
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS or1k_legitimize_address
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT or1k_trampoline_init
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM or1k_cannot_force_const_mem
+
+#undef TARGET_DWARF_CALLING_CONVENTION
+#define TARGET_DWARF_CALLING_CONVENTION or1k_dwarf_calling_convention
+
+/* uClibc has some instances where (non-coforming to ISO C) a non-varargs
+ prototype is in scope when calling that function which is implemented
+ as varargs. We want this to work at least where none of the anonymous
+ arguments are used. I.e. we want the last named argument to be known
+ as named so it can be passed in a register, varars funtion or not. */
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+
+/* Is this suitable for an immediate operand.
+
+ JPB 1-Sep-10: Is this correct. We can only do 16-bit immediates directly. */
+static bool
+or1k_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ if (or1k_tls_symbolic_operand (x) != TLS_MODEL_NONE)
+ return 0;
+
+ return GET_CODE(x) != CONST_DOUBLE || (GET_MODE (x) == VOIDmode && !flag_pic);
+}
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P or1k_legitimate_constant_p
+
+/* On the OR1K, no functions pop their arguments.
+ JPB 29-Aug-10: Is this really correct? */
+static int
+or1k_return_pops_args (tree ARG_UNUSED(fundecl), tree ARG_UNUSED(funtype), int ARG_UNUSED(size))
+{
+ return 0;
+}
+#undef TARGET_RETURN_POPS_ARGS
+#define TARGET_RETURN_POPS_ARGS or1k_return_pops_args
+
+/* Determine where to put an argument to a function. Value is zero to push
+ the argument on the stack, or a hard register in which to store the
+ argument.
+
+ "mode" is the argument's machine mode.
+
+ "type" is the data type of the argument (as a tree). This is null for
+ libcalls where that information may not be available.
+
+ "cum" is a variable of type CUMULATIVE_ARGS which gives info about the
+ preceding args and about the function being called.
+
+ "named" is nonzero if this argument is a named parameter (otherwise it is
+ an extra parameter matching an ellipsis).
+
+ On the ARC the first MAX_ARC_PARM_REGS args are normally in registers and
+ the rest are pushed. */
+static rtx
+or1k_function_arg (cumulative_args_t cum, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *cum_pnt = get_cumulative_args (cum);
+
+ if (OR1K_PASS_IN_REG_P (*cum_pnt, mode, type, named))
+ return gen_rtx_REG (mode, OR1K_ROUND_ADVANCE_CUM (*cum_pnt, mode, type)
+ + GP_ARG_MIN_REG);
+ else
+ return 0;
+}
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG or1k_function_arg
+/* Update the data in "cum" to advance over an argument of mode "mode" and
+ data type "type". ("type" is null for libcalls where that information may
+ not be available.) */
+static void
+or1k_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
+ const_tree type, bool ARG_UNUSED(named))
+{
+ CUMULATIVE_ARGS *cum_pnt = get_cumulative_args (cum);
+
+ *cum_pnt = OR1K_ROUND_ADVANCE_CUM (*cum_pnt, mode, type)
+ + OR1K_ROUND_ADVANCE_ARG (mode, type);
+}
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE or1k_function_arg_advance
+
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS or1k_print_operand_address
+
+/* Trampoline stubs are yet to be written. */
+/* #define TARGET_ASM_TRAMPOLINE_TEMPLATE */
+/* #define TARGET_TRAMPOLINE_INIT */
+
+/* Lay out structs with increased alignment so that they can be accessed
+ more efficiently. But don't increase the size of one or two byte
+ structs. */
+int
+or1k_struct_alignment (tree t)
+{
+ unsigned HOST_WIDE_INT total = 0;
+ int default_align_fields = 0;
+ int special_align_fields = 0;
+ tree field;
+ unsigned max_align
+ = maximum_field_alignment ? maximum_field_alignment : BIGGEST_ALIGNMENT;
+ bool struct_p;
+
+ switch (TREE_CODE (t))
+ {
+ case RECORD_TYPE:
+ struct_p = true; break;
+ case UNION_TYPE: case QUAL_UNION_TYPE:
+ struct_p = false; break;
+ default: gcc_unreachable ();
+ }
+ /* Skip all non field decls */
+ for (field = TYPE_FIELDS (t); field; field = TREE_CHAIN (field))
+ {
+ unsigned HOST_WIDE_INT field_size;
+
+ if (TREE_CODE (field) != FIELD_DECL ||
+ TREE_TYPE (field) == error_mark_node)
+ continue;
+ /* If this is a field in a non-qualified union, or the sole field in
+ a struct, and the alignment was set by the user, don't change the
+ alignment.
+ If the field is a struct/union in a non-qualified union, we already
+ had sufficient opportunity to pad it - if we didn't, that'd be
+ because the alignment was set as above.
+ Likewise if the field is a struct/union and the sole field in a
+ struct. */
+ if (DECL_USER_ALIGN (field)
+ || TYPE_USER_ALIGN (TREE_TYPE (field))
+ || TREE_CODE (TREE_TYPE (field)) == UNION_TYPE
+ || TREE_CODE (TREE_TYPE (field)) == QUAL_UNION_TYPE
+ || TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
+ {
+ if (TREE_CODE (t) == UNION_TYPE)
+ return 0;
+ special_align_fields++;
+ }
+ else if (DECL_PACKED (field))
+ special_align_fields++;
+ else
+ default_align_fields++;
+ if (!host_integerp (DECL_SIZE_UNIT (field), 1))
+ field_size = max_align;
+ else
+ field_size = tree_low_cst (DECL_SIZE_UNIT (field), 1);
+ if (field_size >= BIGGEST_ALIGNMENT)
+ total = max_align;
+ if (struct_p)
+ total += field_size;
+ else
+ total = MAX (total, field_size);
+ }
+
+ if (!default_align_fields
+ && (TREE_CODE (t) != RECORD_TYPE || special_align_fields <= 1))
+ return 0;
+ return total < max_align ? (1U << ceil_log2 (total)) : max_align;
+}
+
+/* Increase the alignment of objects so that they are easier to copy.
+ Note that this can cause more struct copies to be inlined, so code
+ size might increase, but so should perfromance. */
+int
+or1k_data_alignment (tree t, int align)
+{
+ if (align < FASTEST_ALIGNMENT && TREE_CODE (t) == ARRAY_TYPE)
+ {
+ int size = int_size_in_bytes (t);
+
+ return (size > 0 && size < FASTEST_ALIGNMENT / BITS_PER_UNIT
+ ? (1 << floor_log2 (size)) * BITS_PER_UNIT
+ : FASTEST_ALIGNMENT);
+ }
+ return align;
+}
+
+static void
+or1k_option_override (void)
+{
+ if (!TARGET_DELAY_ON)
+ flag_delayed_branch = FALSE;
+}
+
+static void
+or1k_asm_file_start(void)
+{
+ default_file_start();
+
+ if (TARGET_DELAY_OFF) {
+ fprintf(asm_out_file, "\t.nodelay\n");
+ }
+}
+
+/* Implement EH_RETURN_HANDLER_RTX.
+ * Make eh_return use the link register. Epilogue LR restore
+ * is suppressed for eh_return. */
+rtx
+or1k_eh_return_handler_rtx (void)
+{
+ return INCOMING_RETURN_ADDR_RTX;
+}
+
+/* Implement RETURN_ADDR_RTX.
+ * We do not support moving back to a previous frame. */
+rtx
+or1k_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ /* We don't know if LR is going to be saved or if we're going to
+ * be clobbering it with the GOT instruction.
+ * Therefore the safest bet is to force a save of LR and use that.
+ * Assume it's going to be first in the stack. */
+
+ cfun->machine->force_lr_save = true;
+ return gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx,
+ -UNITS_PER_WORD));
+}
+
+/* Implement TARGET_FRAME_POINTER_REQUIRED.
+ * We want frame pointer in eh_return and when alloca is used */
+static bool
+or1k_frame_pointer_required (void)
+{
+ return crtl->calls_eh_return || cfun->calls_alloca;
+}
+
+/* Functions to save and restore machine-specific function data. */
+static struct machine_function *
+or1k_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+void
+or1k_init_expanders (void)
+{
+ /* Arrange to initialize and mark the machine per-function
+ * status. */
+ init_machine_status = or1k_init_machine_status;
+
+ if (cfun && cfun->machine)
+ {
+ cfun->machine->force_lr_save = false;
+ }
+}
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED or1k_frame_pointer_required
+
+/* Initialize the GCC target structure. */
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-or1k.h"
diff -rNU3 dist.orig/gcc/config/or1k/or1k.h dist/gcc/config/or1k/or1k.h
--- dist.orig/gcc/config/or1k/or1k.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,1202 @@
+/* Definitions of target machine for GNU compiler. OpenRISC 1000 version.
+ Copyright (C) 1987, 1988, 1992, 1995, 1996, 1999, 2000, 2001, 2002,
+ 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2010 Embecosm Limited
+ Contributed by Damjan Lampret in 1999.
+ Major optimizations by Matjaz Breskvar in 2005.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 1, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef _OR1K_H_
+#define _OR1K_H_
+
+#include "config/or1k/or1k-opts.h"
+
+/* Target CPU builtins */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ if (TARGET_DELAY_OFF) { \
+ builtin_define ("__OR1KND__"); \
+ builtin_define ("__or1knd__"); \
+ builtin_assert ("cpu=or1knd"); \
+ builtin_assert ("machine=or1knd"); \
+ } else { \
+ builtin_define ("__OR1K__"); \
+ builtin_define ("__or1k__"); \
+ builtin_assert ("cpu=or1k"); \
+ builtin_assert ("machine=or1k"); \
+ } \
+ if (TARGET_DELAY_ON) { \
+ builtin_define ("__OR1K_DELAY__"); \
+ } else if (TARGET_DELAY_OFF) { \
+ builtin_define ("__OR1K_NODELAY__"); \
+ } else if (TARGET_DELAY_COMPAT) { \
+ builtin_define ("__OR1K_DELAY_COMPAT__"); \
+ } \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{!mnewlib:%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}}"
+
+/* Make sure we pick up the crti.o, crtbegin.o, crtend.o and crtn.o files. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared:%{pie:Scrt0.o%s;:crt0.o%s}} crti.o%s \
+ %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mnewlib:-entry 0x100} %{static:-static} %{shared:-shared}"
+
+/* Override previous definitions (linux.h). Newlib doesn't have a profiling
+ version of the library, but it does have a debugging version (libg.a) */
+#undef LIB_SPEC
+#define LIB_SPEC "%{!mnewlib:%{pthread:-lpthread} \
+ %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}" \
+ "%{mnewlib:%{!g:-lc} %{g:-lg} -lor1k \
+ %{mboard=*:-lboard-%*} %{!mboard=*:-lboard-or1ksim} \
+ %{!g:-lc} %{g:-lg} \
+ }"
+
+#define SUBTARGET_EXTRA_SPECS
+
+#define EXTRA_SPECS \
+ SUBTARGET_EXTRA_SPECS
+
+/* Target machine storage layout */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ This is not true on the or1k. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is numbered. */
+#define WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_WORD 32
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing pointers in memory. */
+#define POINTER_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+/*
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
+ && (ALIGN) < FASTEST_ALIGNMENT \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+*/
+
+/* One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that `strcpy' calls
+ that copy constants to character arrays can be done inline. */
+/*
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((((ALIGN) < FASTEST_ALIGNMENT) \
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? FASTEST_ALIGNMENT : (ALIGN))
+*/ /* CHECK - btw code gets bigger with this one */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((ALIGN) < FASTEST_ALIGNMENT \
+ ? or1k_data_alignment ((TYPE), (ALIGN)) : (ALIGN))
+
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ ((ALIGN) < FASTEST_ALIGNMENT \
+ ? or1k_data_alignment ((TYPE), (ALIGN)) : (ALIGN))
+
+/* Define this if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1 /* CHECK */
+
+/* Align an address */
+#define OR1K_ALIGN(n,a) (((n) + (a) - 1) & ~((a) - 1))
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS /* CHECK */
+
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ (MODE) = SImode;
+ /* CHECK */
+
+
+/*
+ * brings 0.4% improvment in static size for linux
+ *
+#define PROMOTE_FOR_CALL_ONLY
+*/
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+#define NO_FUNCTION_CSE 1 /* check */
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+
+#define OR1K_LAST_ACTUAL_REG 31
+#define ARG_POINTER_REGNUM (OR1K_LAST_ACTUAL_REG + 1)
+#define FRAME_POINTER_REGNUM (ARG_POINTER_REGNUM + 1)
+#define OR1K_LAST_INT_REG FRAME_POINTER_REGNUM
+#define OR1K_FLAGS_REG (OR1K_LAST_INT_REG + 1)
+#define FIRST_PSEUDO_REGISTER (OR1K_FLAGS_REG + 1)
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the or1k, these are r1 as stack pointer and
+ r2 as frame/arg pointer. r9 is link register, r0
+ is zero, r10 is linux thread and r16 is got pointer */
+#define FIXED_REGISTERS { \
+ 1, 1, 0, 0, 0, 0, 0, 0, \
+ 0, 1, 1, 0, 0, 0, 0, 0, \
+ 1, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1 }
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS { \
+ 1, 1, 0, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 0, 1, \
+ 1, 1, 0, 1, 0, 1, 0, 1, \
+ 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1}
+
+/* stack pointer: must be FIXED and CALL_USED */
+/* hard frame pointer: must be call saved. */
+/* soft frame pointer / arg pointer: must be FIXED and CALL_USED */
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+ On the or1k, all registers are one word long. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* A C expression for the cost of moving data of mode mode from a register in
+ class "from" to one in class "to". The classes are expressed using the
+ enumeration values such as GENERAL_REGS. A value of 2 is the default; other
+ values are interpreted relative to that.
+
+ It is not required that the cost always equal 2 when "from" is the same as
+ "to"; on some machines it is expensive to move between registers if they are
+ not general registers.
+
+ If reload sees an insn consisting of a single set between two hard
+ registers, and if REGISTER_MOVE_COST applied to their classes returns a
+ value of 2, reload does not check to ensure that the constraints of the
+ insn are met. Setting a cost of other than 2 will allow reload to verify
+ that the constraints are met. You should do this if the "movm" pattern's
+ constraints do not allow such copying.
+
+ JPB 31-Aug-10: This is just the default. */
+#define REGISTER_MOVE_COST(mode, from, to) 2
+
+/* A C expression for the cost of moving data of mode mode between a register
+ of class "class" and memory; "in" is zero if the value is to be written to
+ memory, nonzero if it is to be read in. This cost is relative to those in
+ REGISTER_MOVE_COST. If moving between registers and memory is more
+ expensive than between two registers, you should define this macro to
+ express the relative cost.
+
+ If you do not define this macro, GCC uses a default cost of 4 plus the cost
+ of copying via a secondary reload register, if one is needed. If your
+ machine requires a secondary reload register to copy between memory and a
+ register of class but the reload mechanism is more complex than copying via
+ an intermediate, define this macro to reflect the actual cost of the move.
+
+ GCC defines the function "memory_move_secondary_cost" if secondary reloads
+ are needed. It computes the costs due to copying via a secondary
+ register. If your machine copies from memory using a secondary register in
+ the conventional way but the default base value of 4 is not correct for
+ your machine, define this macro to add some other value to the result of
+ that function. The arguments to that function are the same as to this
+ macro.
+
+ JPB 31-Aug-10. Is this really correct? I suppose the OR1K only takes one
+ cycle, notionally, to access memory, but surely that will
+ often stall the pipeline. Needs more investigation. */
+#define MEMORY_MOVE_COST(mode, class, in) 2
+
+/* A C expression for the cost of a branch instruction. A value of 1 is the
+ default; other values are interpreted relative to that. Parameter "speed_p"
+ is TRUE when the branch in question should be optimized for speed. When it
+ is FALSE, BRANCH_COST should be returning value optimal for code size
+ rather then performance considerations. "predictable_p" is true for well
+ predictable branches. On many architectures the BRANCH_COST can be reduced
+ then.
+
+ JPB 31-Aug-10. The original code had the comment that "... this should
+ specify the cost of a branch insn; roughly the number of
+ extra insns that should be added to avoid a branch.
+
+ Set this to 3 on the or1k since that is roughly the average
+ cost of an unscheduled conditional branch.
+
+ Cost of 2 and 3 give equal and ~0.7% bigger binaries
+ respectively."
+
+ This seems ad-hoc. Probably we need some experiments. */
+#define BRANCH_COST(speed_p, predictable_p) 2
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 1
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM 2
+
+/* Link register. */
+#define LINK_REGNUM 9
+
+/* Register in which static-chain is passed to a function. */
+
+#define STATIC_CHAIN_REGNUM 11
+
+#define PROLOGUE_TMP 13
+#define EPILOGUE_TMP 3
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+/*#define STRUCT_VALUE_REGNUM 0*/
+
+/* Pass address of result struct to callee as "invisible" first argument */
+#define STRUCT_VALUE 0
+
+/* -----------------------[ PHX start ]-------------------------------- */
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ GENERAL_REGS and BASE_REGS classess are the same on or1k.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The or1k has only one kind of registers, so NO_REGS, GENERAL_REGS
+ and ALL_REGS are the only classes. */
+/* JPB 26-Aug-10: Based on note from Mikhael (mirekez@gmail.com), we don't
+ need CR_REGS and it is in the wrong place for later things! */
+enum reg_class
+{
+ NO_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes. This is an initializer for a
+ vector of HARD_REG_SET of length N_REG_CLASSES.
+
+ An initializer containing the contents of the register classes, as integers
+ which are bit masks. The Nth integer specifies the contents of class N.
+ The way the integer MASK is interpreted is that register R is in the class
+ if `MASK & (1 << R)' is 1.
+
+ When the machine has more than 32 registers, an integer does not suffice.
+ Then the integers are replaced by sub-initializers, braced groupings
+ containing several integers. Each sub-initializer must be suitable as an
+ initializer for the type `HARD_REG_SET' which is defined in
+ `hard-reg-set.h'.
+
+ For the OR1K we have the minimal set. GENERAL_REGS is all except r0, which
+ it permanently zero. */
+#define REG_CLASS_CONTENTS \
+ { \
+ { 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0xffffffff, 0x00000003 }, /* GENERAL_REGS */ \
+ { 0xffffffff, 0x00000007 } /* ALL_REGS */ \
+ }
+
+/* The same information, inverted:
+
+ Return the class number of the smallest class containing reg number REGNO.
+ This could be a conditional expression or could index an array.
+
+ ??? 0 is not really a register, but a constant. */
+#define REGNO_REG_CLASS(regno) \
+ ((0 == regno) ? ALL_REGS : ((1 <= regno) && (regno <= OR1K_LAST_INT_REG)) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Given an rtx X being reloaded into a reg required to be in class CLASS,
+ return the class of reg to actually use. In general this is just CLASS;
+ but on some machines in some cases it is preferable to use a more
+ restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
+
+/* Return the maximum number of consecutive registers needed to represent mode
+ MODE in a register of class CLASS.
+
+ On the or1k, this is always the size of MODE in words, since all registers
+ are the same size. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+
+/* -------------------------------------------------------------------------- */
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack makes the stack pointer a
+ smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame is at the
+ high-address end of the local variables; that is, each additional local
+ variable allocated goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at. If
+ FRAME_GROWS_DOWNWARD, this is the offset to the END of the first local
+ allocated. Otherwise, it is the offset to the BEGINNING of the first local
+ allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. The value is the number of bytes allocated to this
+ area.
+
+ No such allocation for OR1K. */
+/* #define REG_PARM_STACK_SPACE(FNDECL) (UNITS_PER_WORD * GP_ARG_NUM_REG) */
+
+/* Define this if the above stack space is to be considered part of the
+ space allocated by the caller.
+
+ N/a for OR1K. */
+/* #define OUTGOING_REG_PARM_STACK_SPACE */
+
+/* Define this macro if `REG_PARM_STACK_SPACE' is defined, but the
+ stack parameters don't skip the area specified by it.
+
+ N/a for OR1K. */
+/* #define STACK_PARMS_IN_REG_PARM_AREA */
+
+/* If nonzero, the maximum amount of space required for outgoing arguments
+ will be computed and placed into the variable
+ current_function_outgoing_args_size. No space will be pushed onto the stack
+ for each call; instead, the function prologue should increase the stack
+ frame size by this amount.
+
+ Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is not proper.
+
+ This is the approached used by OR1K. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = or1k_initial_elimination_offset ((FROM), (TO))
+
+/* Minimum and maximum general purpose registers used to hold arguments. */
+#define GP_ARG_MIN_REG 3
+#define GP_ARG_MAX_REG 8
+#define GP_ARG_NUM_REG (GP_ARG_MAX_REG - GP_ARG_MIN_REG + 1)
+
+/* Return register */
+#define GP_ARG_RETURN 11
+#define GP_ARG_RETURNH 12
+
+/* TLS thread pointer register */
+#define THREAD_PTR_REGNUM 10
+
+/* Position Independent Code. */
+
+#define PIC_OFFSET_TABLE_REGNUM 16
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent code.
+ You can assume that X satisfies CONSTANT_P, so you need not
+ check this. You can also assume `flag_pic' is true, so you need not
+ check it either. You need not define this macro if all constants
+ (including SYMBOL_REF) can be immediate operands when generating
+ position independent code. */
+#define LEGITIMATE_PIC_OPERAND_P(X) or1k_legitimate_pic_operand_p (X)
+
+/* A C expression to create an RTX representing the place where a library
+ function returns a value of mode mode.
+
+ Note that “library function” in this context means a compiler support
+ routine, used to perform arithmetic, whose name is known specially by the
+ compiler and was not mentioned in the C code being compiled.
+
+ For the OR1K, return value is in R11 (GP_ARG_RETURN). */
+#define LIBCALL_VALUE(mode) \
+ gen_rtx_REG( \
+ ((GET_MODE_CLASS (mode) != MODE_INT \
+ || GET_MODE_SIZE (mode) >= 4) \
+ ? (mode) \
+ : SImode), \
+ GP_ARG_RETURN)
+
+/* Define this if PCC uses the nonreentrant convention for returning
+ structure and union values.
+
+ Not needed for OR1K. */
+/*#define PCC_STATIC_STRUCT_RETURN */
+
+/* A C expression that is nonzero if regno is the number of a hard register in
+ which the values of called function may come back.
+
+ A register whose use for returning values is limited to serving as the
+ second of a pair (for a value of type double, say) need not be recognized
+ by this macro. So for most machines, this definition suffices:
+
+ #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+ If the machine has register windows, so that the caller and the called
+ function use different registers for the return value, this macro should
+ recognize only the caller's register numbers.
+
+ For OR1K, we must check if we have the return register.
+
+ From GCC 4.6, this will be replaced by TARGET_FUNCION_VALUE_REGNO_P target
+ hook function. */
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_ARG_RETURN)
+
+/* 1 if N is a possible register number for function argument passing. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((N) >= GP_ARG_MIN_REG && (N) <= GP_ARG_MAX_REG)
+
+/* A code distinguishing the floating point format of the target
+ machine. There are three defined values: IEEE_FLOAT_FORMAT,
+ VAX_FLOAT_FORMAT, and UNKNOWN_FLOAT_FORMAT. */
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* A C type for declaring a variable that is used as the first argument of
+ FUNCTION_ARG and other related values. For some target machines, the type
+ int suffices and can hold the number of bytes of argument so far.
+
+ There is no need to record in CUMULATIVE_ARGS anything about the arguments
+ that have been passed on the stack. The compiler has other variables to
+ keep track of that. For target machines on which all arguments are passed
+ on the stack, there is no need to store anything in CUMULATIVE_ARGS;
+ however, the data structure must exist and should not be empty, so use
+ int. */
+#define CUMULATIVE_ARGS int
+
+/* A C statement (sans semicolon) for initializing the variable "cum" for the
+ state at the beginning of the argument list. The variable has type
+ CUMULATIVE_ARGS. The value of "fntype" is the tree node for the data type
+ of the function which will receive the args, or 0 if the args are to a
+ compiler support library function. For direct calls that are not libcalls,
+ "fndecl" contain the declaration node of the function. "fndecl" is also set
+ when INIT_CUMULATIVE_ARGS is used to find arguments for the function being
+ compiled. "n_named_args" is set to the number of named arguments,
+ including a structure return address if it is passed as a parameter, when
+ making a call. When processing incoming arguments, "n_named_args" is set to
+ −1.
+
+ When processing a call to a compiler support library function, "libname"
+ identifies which one. It is a symbol_ref rtx which contains the name of the
+ function, as a string. "libname" is 0 when an ordinary C function call is
+ being processed. Thus, each time this macro is called, either "libname" or
+ "fntype" is nonzero, but never both of them at once.
+
+ For the OR1K, we set "cum" to zero each time.
+ JPB 29-Aug-10: Is this correct? */
+#define INIT_CUMULATIVE_ARGS(cum, fntype, libname, fndecl, n_named_args) \
+ (cum = 0)
+
+/* -------------------------------------------------------------------------- */
+/* Define intermediate macro to compute the size (in registers) of an argument
+ for the or1k.
+
+ The OR1K_ROUND_ADVANCE* macros are local to this file. */
+
+/* Round "size" up to a word boundary. */
+#define OR1K_ROUND_ADVANCE(size) \
+ (((size) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Round arg "mode"/"type" up to the next word boundary. */
+#define OR1K_ROUND_ADVANCE_ARG(mode, type) \
+ ((mode) == BLKmode \
+ ? OR1K_ROUND_ADVANCE (int_size_in_bytes (type)) \
+ : OR1K_ROUND_ADVANCE (GET_MODE_SIZE (mode)))
+
+/* The ABI says that no rounding to even or odd words takes place. */
+#define OR1K_ROUND_ADVANCE_CUM(cum, mode, type) (cum)
+
+/* Return boolean indicating if arg of type "type" and mode "mode" will be
+ passed in a reg. This includes arguments that have to be passed by
+ reference as the pointer to them is passed in a reg if one is available
+ (and that is what we're given).
+
+ When passing arguments "named" is always 1. When receiving arguments
+ "named" is 1 for each argument except the last in a stdarg/varargs
+ function. In a stdarg function we want to treat the last named arg as
+ named. In a varargs function we want to treat the last named arg (which is
+ `__builtin_va_alist') as unnamed.
+
+ This macro is only used in this file. */
+#define OR1K_PASS_IN_REG_P(cum, mode, type, named) \
+ ((named) \
+ && ((OR1K_ROUND_ADVANCE_CUM ((cum), (mode), (type)) \
+ + OR1K_ROUND_ADVANCE_ARG ((mode), (type)) \
+ <= GP_ARG_NUM_REG)))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+#define FUNCTION_PROFILER(FILE, LABELNO)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, the
+ stack pointer does not matter. The value is tested only in functions that
+ have frame pointers. No definition is equivalent to always zero.
+
+ The default suffices for OR1K. */
+#define EXIT_IGNORE_STACK 0
+
+/* A C expression whose value is RTL representing the location of the
+ incoming return address at the beginning of any function, before the
+ prologue. This RTL is either a REG, indicating that the return
+ value is saved in REG, or a MEM representing a location in
+ the stack. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LINK_REGNUM)
+
+#define RETURN_ADDR_RTX or1k_return_addr_rtx
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT */
+/* #define HAVE_POST_DECREMENT */
+
+/* #define HAVE_PRE_DECREMENT */
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* True if X is an rtx for a constant that is a valid address.
+
+ JPB 29-Aug-10: Why is the default implementation not OK? */
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* A C expression which is nonzero if register number num is suitable for use
+ as a base register in operand addresses. Like TARGET_LEGITIMATE_ADDRESS_P,
+ this macro should also define a strict and a non-strict variant. Both
+ variants behave the same for hard register; for pseudos, the strict variant
+ will pass only those that have been allocated to a valid hard registers,
+ while the non-strict variant will pass all pseudos.
+
+ Compiler source files that want to use the strict variant of this and other
+ macros define the macro REG_OK_STRICT. You should use an #ifdef
+ REG_OK_STRICT conditional to define the strict variant in that case and the
+ non-strict variant otherwise.
+
+ JPB 29-Aug-10: This has been conflated with the old REG_OK_FOR_BASE_P
+ function, which is no longer part of GCC.
+
+ I'm not sure this is right. r0 can be a base register, just
+ it can't get set by the user. */
+#ifdef REG_OK_STRICT
+#define REGNO_OK_FOR_BASE_P(num) \
+ ( ((0 < (num)) && ((num) <= OR1K_LAST_INT_REG)) \
+ || ((0 < reg_renumber[num]) && (reg_renumber[num] <= OR1K_LAST_INT_REG)))
+
+#else
+/* Accept an int register or a pseudo reg.
+
+ JPB 1-Sep-10: Should this allow r0, if the strict version does not? */
+#define REGNO_OK_FOR_BASE_P(num) ((num) <= OR1K_LAST_INT_REG || \
+ (num) >= FIRST_PSEUDO_REGISTER)
+#endif
+
+/* OR1K doesn't have any indexed addressing. */
+#define REG_OK_FOR_INDEX_P(X) 0
+#define REGNO_OK_FOR_INDEX_P(X) 0
+
+
+/* Specify the machine mode that this machine uses for the index in the
+ tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* The maximum number of bytes that a single instruction can move quickly
+ between memory and registers or between two memory locations. */
+#define MOVE_MAX 4
+
+/* Define this if zero-extension is slow (more than one real instruction). */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE SImode
+
+
+/* -------------------------------------------------------------------------- */
+/* Condition code stuff */
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+#define SELECT_CC_MODE(op, x, y) or1k_select_cc_mode(op)
+
+/* Can the condition code MODE be safely reversed? This is safe in
+ all cases on this port, because at present it doesn't use the
+ trapping FP comparisons (fcmpo). */
+#define REVERSIBLE_CC_MODE(mode) 1
+
+/* Given a condition code and a mode, return the inverse condition.
+
+ JPB 31-Aug-10: This seems like the default. Do we even need this? */
+#define REVERSE_CONDITION(code, mode) reverse_condition (code)
+
+
+/* -------------------------------------------------------------------------- */
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+#define ASM_COMMENT_START "#"
+
+/* Output to assembler file text saying following lines may contain character
+ constants, extra white space, comments, etc.
+
+ JPB 29-Aug-10: Default would seem to be OK here. */
+#define ASM_APP_ON "#APP\n"
+
+/* Output to assembler file text saying following lines no longer contain
+ unusual constructs.
+
+ JPB 29-Aug-10: Default would seem to be OK here. */
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* Switch to the text or data segment. */
+
+/* Output before read-only data. */
+#define TEXT_SECTION_ASM_OP "\t.section .text"
+
+/* Output before writable data. */
+#define DATA_SECTION_ASM_OP "\t.section .data"
+
+/* Output before uninitialized data. */
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+
+/* How to refer to registers in assembler output. This sequence is indexed by
+ compiler's hard-register-number (see above). */
+#define REGISTER_NAMES \
+ {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "argp", "frame", "cc-flag"}
+
+
+/* -------------------------------------------------------------------------- */
+/* Debug things for DBX (STABS) */
+/* */
+/* Note. Our config.gcc includes dbxelf.h, which sets up appropriate */
+/* defaults. Choice of which debug format to use is in our elf.h */
+/* -------------------------------------------------------------------------- */
+
+/* Don't try to use the type-cross-reference character in DBX data.
+ Also has the consequence of putting each struct, union or enum
+ into a separate .stabs, containing only cross-refs to the others. */
+/* JPB 24-Aug-10: Is this really correct. Can't GDB use this info? */
+#define DBX_NO_XREFS
+
+/* -------------------------------------------------------------------------- */
+/* Debug things for DWARF2 */
+/* */
+/* Note. Choice of which debug format to use is in our elf.h */
+/* -------------------------------------------------------------------------- */
+
+/* We support frame unwind info including for exceptions handling. This needs
+ INCOMING_RETURN_ADDR_RTX to be set and OBJECT_FORMAT_ELF to be defined (in
+ elfos.h). Override any default value. */
+#undef DWARF2_UNWIND_INFO
+#define DWARF2_UNWIND_INFO 1
+
+/* We want frame info produced. Note that this is superfluous if
+ DWARF2_UNWIND_INFO is non-zero, but we set so this so, we can produce frame
+ info even when it is zero. Override any default value. */
+#undef DWARF2_FRAME_INFO
+#define DWARF2_FRAME_INFO 1
+
+/* Macro specifying which register holds the return address */
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LINK_REGNUM)
+
+/* Where is the start of our stack frame in relation to the end of the
+ previous stack frame at the start of a function, before the prologue */
+#define INCOMING_FRAME_SP_OFFSET 0
+
+/* Use compact debug tables. Generates .file/.loc directives. */
+#undef DWARF2_ASM_LINE_DEBUG_INFO
+#define DWARF2_ASM_LINE_DEBUG_INFO 1
+
+/* We don't need an alternative return address for now. */
+/* DWARF_ALT_FRAME_RETURN_COLUMN */
+
+/* We always save registers in the prologue with word alignment, so don't
+ need this. */
+/* DWARF_CIE_DATA_ALIGNMENT */
+
+/* This specifies the maximum number of registers we can save in a frame. We
+ could note that only SP, FP, LR, arg regs and callee saved regs come into
+ this category. However this is only an efficiency thing, so for now we
+ don't use it. */
+/* DWARF_FRAME_REGISTERS */
+
+/* This specifies a mapping from register numbers in .dwarf_frame to
+ .eh_frame. However for us they are the same, so we don't need it. */
+/* DWARF_FRAME_REGNUM */
+
+/* Defined if the DWARF column numbers do not match register numbers. For us
+ they do, so this is not needed. */
+/* DWARF_REG_TO_UNWIND_COLUMN */
+
+/* Can be used to define a register guaranteed to be zero. Only useful if zero
+ is used to terminate backtraces, and not recommended for new ports, so we
+ don't use it. */
+/* DWARF_ZERO_REG */
+
+/* This is the inverse function for DWARF_FRAME_REGNUM. Again not needed. */
+/* DWARF2_FRAME_REG_OUT */
+
+
+/* -------------------------------------------------------------------------- */
+/* Node: Label Output */
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global "
+
+#define SUPPORTS_WEAK 1
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ { assemble_name (FILE, NAME); fputs (":\n", FILE); }
+
+/* We use -fleading-underscore to add it, when necessary.
+ JPB: No prefix for global symbols */
+#define USER_LABEL_PREFIX ""
+
+/* Remove any previous definition (elfos.h). */
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+ sprintf (LABEL, "*%s%d", PREFIX, NUM)
+
+/* This is how to output an assembler line defining an int constant. */
+#define ASM_OUTPUT_INT(stream, value) \
+ { \
+ fprintf (stream, "\t.word\t"); \
+ output_addr_const (stream, (value)); \
+ fprintf (stream, "\n")}
+
+/* This is how to output an assembler line defining a float constant. */
+#define ASM_OUTPUT_FLOAT(stream, value) \
+ { long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (value,l); \
+ fprintf(stream,"\t.word\t0x%08x\t\t# float %26.7e\n", l, value); }
+
+/* This is how to output an assembler line defining a double constant. */
+#define ASM_OUTPUT_DOUBLE(stream, value) \
+ { long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (value,&l[0]); \
+ fprintf(stream,"\t.word\t0x%08x,0x%08x\t# float %26.16le\n", \
+ l[0],l[1],value); }
+
+/* This is how to output an assembler line defining a long double constant.
+
+ JPB 29-Aug-10: Do we really mean this. I thought long double on OR1K was
+ the same as double. */
+#define ASM_OUTPUT_LONG_DOUBLE(stream, value) \
+ { long l[4]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (value,&l[0]); \
+ fprintf (stream, \
+ "\t.word\t0x%08x,0x%08x,0x%08x,0x%08x\t# float %26.18lle\n", \
+ l[0],l[1],l[2],l[3],value); }
+
+/* This is how to output an assembler line defining a short constant. */
+#define ASM_OUTPUT_SHORT(stream, value) \
+ { fprintf (stream, "\t.half\t"); \
+ output_addr_const (stream, (value)); \
+ fprintf (stream, "\n"); }
+
+/* This is how to output an assembler line defining a char constant. */
+#define ASM_OUTPUT_CHAR(stream, value) \
+ { fprintf (stream, "\t.byte\t"); \
+ output_addr_const (stream, (value)); \
+ fprintf (stream, "\n")}
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(stream, value) \
+ fprintf (stream, "\t.byte\t0x%02x\n", (value))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code.
+
+ JPB 29-Aug-10: This was using l.sub (since we don't have l.subi), so it
+ was potty code. Replaced by adding immediate -1. */
+#define ASM_OUTPUT_REG_PUSH(stream, regno) \
+ { fprintf (stream, "\tl.addi\tr1,r1,-4\n"); \
+ fprintf (stream, "\tl.sw\t0(r1),%s\n", reg_names[regno]); }
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+#define ASM_OUTPUT_REG_POP(stream,REGNO) \
+ { fprintf (stream, "\tl.lwz\t%s,0(r1)\n", reg_names[REGNO]); \
+ fprintf (stream, "\tl.addi\tr1,r1,4\n"); }
+
+/* This is how to output an element of a case-vector that is absolute.
+ (The Vax does not use such vectors,
+ but we must define this macro anyway.) */
+#define ASM_OUTPUT_ADDR_VEC_ELT(stream, value) \
+ fprintf (stream, "\t.word\t.L%d\n", value)
+
+/* This is how to output an element of a case-vector that is relative. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(stream, body, value, rel) \
+ fprintf (stream, "\t.word\t.L%d-.L%d\n", value, rel)
+
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+/* ??? If we were serious about PIC, we should also use l.jal to get
+ the table start address. */
+
+/* This is how to output an assembler line that says to advance the location
+ counter to a multiple of 2**log bytes. */
+#define ASM_OUTPUT_ALIGN(stream, log) \
+ if ((log) != 0) \
+ { \
+ fprintf (stream, "\t.align\t%d\n", 1 << (log)); \
+ }
+
+/* This is how to output an assembler line that says to advance the location
+ counter by "size" bytes. */
+#define ASM_OUTPUT_SKIP(stream, size) \
+ fprintf (stream, "\t.space %d\n", (size))
+
+/* Need to split up .ascii directives to avoid breaking
+ the linker. */
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(stream, ptr, len) \
+ output_ascii_pseudo_op (stream, (const unsigned char *) (ptr), len)
+
+/* Invoked just before function output. */
+#define ASM_OUTPUT_FUNCTION_PREFIX(stream, fnname) \
+ { fputs (".proc\t", stream); assemble_name (stream, fnname); \
+ fputs ("\n", stream); }
+
+/* This says how to output an assembler line to define a global common
+ symbol. */
+#define ASM_OUTPUT_COMMON(stream,name,size,rounded) \
+ { data_section (); \
+ fputs ("\t.global\t", stream); \
+ assemble_name(stream, name); \
+ fputs ("\n", stream); \
+ assemble_name (stream, name); \
+ fputs (":\n", stream); \
+ fprintf (stream, "\t.space\t%d\n", rounded); }
+
+/* This says how to output an assembler line to define a local common
+ symbol.
+
+ JPB 29-Aug-10: I'm sure this doesn't work - we don't have a .bss directive
+ like this. */
+#define ASM_OUTPUT_LOCAL(stream, name, size, rounded) \
+ { fputs ("\t.bss\t", (stream)); \
+ assemble_name ((stream), (name)); \
+ fprintf ((stream), ",%d,%d\n", (size), (rounded)); }
+
+/* This says how to output an assembler line to define a global common symbol
+ with size "size" (in bytes) and alignment "align" (in bits). */
+#define ASM_OUTPUT_ALIGNED_COMMON(stream, name, size, align) \
+ { data_section(); \
+ if ((ALIGN) > 8) \
+ { \
+ fprintf(stream, "\t.align %d\n", ((align) / BITS_PER_UNIT)); \
+ } \
+ fputs("\t.global\t", stream); assemble_name(stream, name); \
+ fputs("\n", stream); \
+ assemble_name(stream, name); \
+ fputs (":\n", stream); \
+ fprintf(stream, "\t.space\t%d\n", size); }
+
+/* This says how to output an assembler line to define a local common symbol
+ with size "size" (in bytes) and alignment "align" (in bits). */
+#define ASM_OUTPUT_ALIGNED_LOCAL(stream, name, size, align) \
+ { data_section(); \
+ if ((align) > 8) \
+ { \
+ fprintf(stream, "\t.align %d\n", ((align) / BITS_PER_UNIT)); \
+ } \
+ assemble_name(stream, name); \
+ fputs (":\n", stream); \
+ fprintf(stream, "\t.space %d\n", size); }
+
+/* Store in "output" a string (made with alloca) containing an assembler-name
+ for a local static variable named "name". "labelno" is an integer which is
+ different for each call. */
+#define ASM_FORMAT_PRIVATE_NAME(output, name, labelno) \
+ { (output) = (char *) alloca (strlen ((name)) + 10); \
+ sprintf ((output), "%s.%lu", (name), (unsigned long int) (labelno)); }
+
+/* Macro for %code validation. Returns nonzero if valid.
+
+ The acceptance of '(' is an idea taken from SPARC; output nop for %( if not
+ optimizing or the slot is not filled. */
+#define PRINT_OPERAND_PUNCT_VALID_P(code) (('(' == code) || ('%' == code))
+
+/* Print an instruction operand "x" on file "stream". "code" is the code from
+ the %-spec that requested printing this operand; if `%z3' was used to print
+ operand 3, then CODE is 'z'. */
+#define PRINT_OPERAND(stream, x, code) \
+{ \
+ if (code == 'r' \
+ && GET_CODE (x) == MEM \
+ && GET_CODE (XEXP (x, 0)) == REG) \
+ { \
+ fprintf (stream, "%s", reg_names[REGNO (XEXP (x, 0))]); \
+ } \
+ else if (code == '(') \
+ { \
+ if (TARGET_DELAY_ON && dbr_sequence_length ()) \
+ fprintf (stream, "\t# delay slot filled"); \
+ else if (!TARGET_DELAY_OFF) \
+ fprintf (stream, "\n\tl.nop\t\t\t# nop delay slot"); \
+ } \
+ else if (code == 'C') \
+ { \
+ switch (GET_CODE (x)) \
+ { \
+ case EQ: \
+ fputs ("eq", stream); \
+ break; \
+ case NE: \
+ fputs ("ne", stream); \
+ break; \
+ case GT: \
+ fputs ("gts", stream); \
+ break; \
+ case GE: \
+ fputs ("ges", stream); \
+ break; \
+ case LT: \
+ fputs ("lts", stream); \
+ break; \
+ case LE: \
+ fputs ("les", stream); \
+ break; \
+ case GTU: \
+ fputs ("gtu", stream); \
+ break; \
+ case GEU: \
+ fputs ("geu", stream); \
+ break; \
+ case LTU: \
+ fputs ("ltu", stream); \
+ break; \
+ case LEU: \
+ fputs ("leu", stream); \
+ break; \
+ default: \
+ abort (); \
+ } \
+ } \
+ else if (code == 'H') \
+ { \
+ if (GET_CODE (x) == REG) \
+ fprintf (stream, "%s", reg_names[REGNO (x) + 1]); \
+ else \
+ abort (); \
+ } \
+ else if (GET_CODE (x) == REG) \
+ fprintf (stream, "%s", reg_names[REGNO (x)]); \
+ else if (GET_CODE (x) == MEM) \
+ output_address (XEXP (x, 0)); \
+ else \
+ output_addr_const (stream, x); \
+}
+
+/* The size of the trampoline in bytes. This is a block of code followed by
+ two words specifying the function address and static chain pointer. */
+#define TRAMPOLINE_SIZE \
+ (or1k_trampoline_code_size () + GET_MODE_SIZE (ptr_mode) * 2)
+
+/* Alignment required for trampolines, in bits.
+
+ For the OR1K, there is no need for anything other than word alignment. */
+#define TRAMPOLINE_ALIGNMENT 32
+
+/* Assume that if the assembler supports thread local storage
+ * the system supports it. */
+#if !defined(TARGET_HAVE_TLS) && defined(HAVE_AS_TLS)
+#define TARGET_HAVE_TLS true
+#endif
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_REGNUM 23
+/* Use r25, r27, r29 and r31 (clobber regs) for exception data */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (25 + ((N)<<1)) : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, EH_RETURN_REGNUM)
+#define EH_RETURN_HANDLER_RTX or1k_eh_return_handler_rtx ()
+
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ (flag_pic ? DW_EH_PE_pcrel : DW_EH_PE_absptr)
+
+#define INIT_EXPANDERS or1k_init_expanders ()
+
+/* A C structure for machine-specific, per-function data. This is
+ * added to the cfun structure. */
+typedef struct GTY(()) machine_function
+{
+ /* Force stack save of LR. Used in RETURN_ADDR_RTX. */
+ int force_lr_save;
+} machine_function;
+
+#endif /* _OR1K_H_ */
diff -rNU3 dist.orig/gcc/config/or1k/or1k.md dist/gcc/config/or1k/or1k.md
--- dist.orig/gcc/config/or1k/or1k.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,1599 @@
+;; Machine description for GNU compiler, OpenRISC 1000 family, OR32 ISA
+;; Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
+;; 2009, 2010 Free Software Foundation, Inc.
+;; Copyright (C) 2010 Embecosm Limited
+
+;; Contributed by Damjan Lampret in 1999.
+;; Major optimizations by Matjaz Breskvar in 2005.
+;; Floating point additions by Jungsook Yang
+;; Julius Baxter in 2010
+;; Updated for GCC 4.5 by Jeremy Bennett
+;; and Joern Rennecke in 2010
+
+;; This file is part of GNU CC.
+
+;; This program is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3 of the License, or (at your option)
+;; any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+;; more details.
+;;
+;; You should have received a copy of the GNU General Public License along
+;; with this program. If not, see . */
+
+(define_constants [
+ (SP_REG 1)
+ (FP_REG 2) ; hard frame pointer
+ (CC_REG 34)
+
+ ;; unspec values
+ (UNSPEC_FRAME 0)
+ (UNSPEC_GOT 1)
+ (UNSPEC_GOTOFFHI 2)
+ (UNSPEC_GOTOFFLO 3)
+ (UNSPEC_TPOFFLO 4)
+ (UNSPEC_TPOFFHI 5)
+ (UNSPEC_GOTTPOFFLO 6)
+ (UNSPEC_GOTTPOFFHI 7)
+ (UNSPEC_GOTTPOFFLD 8)
+ (UNSPEC_TLSGDLO 9)
+ (UNSPEC_TLSGDHI 10)
+ (UNSPEC_SET_GOT 101)
+ (UNSPEC_CMPXCHG 201)
+ (UNSPEC_FETCH_AND_OP 202)
+])
+
+(include "predicates.md")
+
+(include "constraints.md")
+
+(define_attr "type"
+ "unknown,load,store,move,extend,logic,add,mul,shift,compare,branch,jump,fp,jump_restore"
+ (const_string "unknown"))
+
+;; Number of machine instructions required to implement an insn.
+(define_attr "length" "" (const_int 1))
+
+;; Single delay slot after branch or jump instructions, wich may contain any
+;; instruction but another branch or jump.
+;; If TARGET_DELAY_OFF is not true, then never use delay slots.
+;; If TARGET_DELAY_ON is not true, no instruction will be allowed to
+;; fill the slot, and so it will be filled by a nop instead.
+(define_delay
+ (and (match_test "!TARGET_DELAY_OFF") (eq_attr "type" "branch,jump"))
+ [(and (match_test "TARGET_DELAY_ON")
+ (eq_attr "type" "!branch,jump")
+ (eq_attr "length" "1")) (nil) (nil)])
+
+;; ALU is modelled as a single functional unit, which is reserved for varying
+;; numbers of slots.
+;;
+;; I think this is all incorrect for the OR1K. The latency says when the
+;; result will be ready, not how long the pipeline takes to execute.
+(define_cpu_unit "or1k_alu")
+(define_insn_reservation "bit_unit" 3 (eq_attr "type" "shift") "or1k_alu")
+(define_insn_reservation "lsu_load" 3 (eq_attr "type" "load") "or1k_alu*3")
+(define_insn_reservation "lsu_store" 2 (eq_attr "type" "store") "or1k_alu")
+(define_insn_reservation "alu_unit" 2
+ (eq_attr "type" "add,logic,extend,move,compare")
+ "or1k_alu")
+(define_insn_reservation "mul_unit" 16 (eq_attr "type" "mul") "or1k_alu*16")
+
+;; AI = Atomic Integers
+;; We do not support DI in our atomic operations.
+(define_mode_iterator AI [QI HI SI])
+
+;; Note: We use 'mult' here for nand since it does not have its own RTX class.
+(define_code_iterator atomic_op [plus minus and ior xor mult])
+(define_code_attr op_name
+ [(plus "add") (minus "sub") (and "and") (ior "or") (xor "xor") (mult "nand")])
+(define_code_attr op_insn
+ [(plus "add") (minus "sub") (and "and") (ior "or") (xor "xor") (mult "and")])
+(define_code_attr post_op_insn
+ [(plus "") (minus "") (and "") (ior "") (xor "")
+ (mult "l.xori \t%3,%3,0xffff # fetch_nand: invert")])
+
+;; Called after register allocation to add any instructions needed for the
+;; prologue. Using a prologue insn is favored compared to putting all of the
+;; instructions in output_function_prologue(), since it allows the scheduler
+;; to intermix instructions with the saves of the caller saved registers. In
+;; some cases, it might be necessary to emit a barrier instruction as the last
+;; insn to prevent such scheduling.
+
+(define_expand "prologue"
+ [(use (const_int 1))]
+ ""
+{
+ or1k_expand_prologue ();
+ DONE;
+})
+
+;; Called after register allocation to add any instructions needed for the
+;; epilogue. Using an epilogue insn is favored compared to putting all of the
+;; instructions in output_function_epilogue(), since it allows the scheduler
+;; to intermix instructions with the restores of the caller saved registers.
+;; In some cases, it might be necessary to emit a barrier instruction as the
+;; first insn to prevent such scheduling.
+(define_expand "epilogue"
+ [(use (const_int 2))]
+ ""
+{
+ or1k_expand_epilogue ();
+ DONE;
+})
+
+(define_insn "frame_alloc_fp"
+ [(set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "nonmemory_operand" "r,I")))
+ (clobber (mem:QI (plus:SI (reg:SI FP_REG)
+ (unspec:SI [(const_int FP_REG)] UNSPEC_FRAME))))]
+ ""
+ "@
+ l.add\tr1,r1,%0\t# allocate frame
+ l.addi\tr1,r1,%0\t# allocate frame"
+ [(set_attr "type" "add")
+ (set_attr "length" "1")])
+
+(define_insn "frame_dealloc_fp"
+ [(set (reg:SI SP_REG) (reg:SI FP_REG))
+ (clobber (mem:QI (plus:SI (reg:SI FP_REG)
+ (unspec:SI [(const_int FP_REG)] UNSPEC_FRAME))))]
+ ""
+ "l.ori\tr1,r2,0\t# deallocate frame"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "frame_dealloc_sp"
+ [(set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "nonmemory_operand" "r,I")))
+ (clobber (mem:QI (plus:SI (reg:SI SP_REG)
+ (unspec:SI [(const_int SP_REG)] UNSPEC_FRAME))))]
+ ""
+ "@
+ l.add \tr1,r1,%0
+ l.addi \tr1,r1,%0"
+ [(set_attr "type" "add")
+ (set_attr "length" "1")])
+
+(define_insn "return_internal"
+ [(return)
+ (use (match_operand 0 "pmode_register_operand" ""))]
+ ""
+ "l.jr \t%0\t# return_internal%("
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+
+
+;;
+;; movQI
+;;
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ if (can_create_pseudo_p())
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_lowpart (QImode, reg);
+ }
+ if (GET_CODE (operands[1]) == MEM && optimize > 0)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (SImode, reg,
+ gen_rtx_ZERO_EXTEND (SImode,
+ operands[1])));
+
+ operands[1] = gen_lowpart (QImode, reg);
+ }
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=m,r,r,r,r")
+ (match_operand:QI 1 "general_operand" "r,r,I,K,m"))]
+ ""
+ "@
+ l.sb \t%0,%1\t # movqi
+ l.ori \t%0,%1,0\t # movqi: move reg to reg
+ l.addi \t%0,r0,%1\t # movqi: move immediate
+ l.ori \t%0,r0,%1\t # movqi: move immediate
+ l.lbz \t%0,%1\t # movqi"
+ [(set_attr "type" "store,add,add,logic,load")])
+
+
+;;
+;; movHI
+;;
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+ if (can_create_pseudo_p())
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (GET_CODE (operands[1]) == MEM && optimize > 0)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (SImode, reg,
+ gen_rtx_ZERO_EXTEND (SImode,
+ operands[1])));
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+ }
+")
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=m,r,r,r,r")
+ (match_operand:HI 1 "general_operand" "r,r,I,K,m"))]
+ ""
+ "@
+ l.sh \t%0,%1\t # movhi
+ l.ori \t%0,%1,0\t # movhi: move reg to reg
+ l.addi \t%0,r0,%1\t # movhi: move immediate
+ l.ori \t%0,r0,%1\t # movhi: move immediate
+ l.lhz \t%0,%1\t # movhi"
+ [(set_attr "type" "store,add,add,logic,load")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+{
+ if (or1k_expand_move (SImode, operands)) DONE;
+})
+
+;;
+;; movSI
+;;
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m")
+ (match_operand:SI 1 "input_operand" "I,K,M,r,m,r"))]
+ "(register_operand (operands[0], SImode)
+ || (register_operand (operands[1], SImode))
+ || (operands[1] == const0_rtx))"
+ "@
+ l.addi \t%0,r0,%1\t # move immediate I
+ l.ori \t%0,r0,%1\t # move immediate K
+ l.movhi \t%0,hi(%1)\t # move immediate M
+ l.ori \t%0,%1,0\t # move reg to reg
+ l.lwz \t%0,%1\t # SI load
+ l.sw \t%0,%1\t # SI store"
+ [(set_attr "type" "add,load,store,add,logic,move")
+ (set_attr "length" "1,1,1,1,1,1")])
+
+(define_insn "movsi_lo_sum"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ ""
+ "l.ori \t%0,%1,lo(%2) # movsi_lo_sum"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_high"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand:SI 1 "immediate_operand" "i")))]
+ ""
+ "l.movhi \t%0,hi(%1) # movsi_high"
+[(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_gotofflo"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "" ""))] UNSPEC_GOTOFFLO))]
+ "flag_pic"
+ "l.ori \t%0,%1,gotofflo(%2) # movsi_gotofflo"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_gotoffhi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFFHI))]
+ "flag_pic"
+ "l.movhi \t%0,gotoffhi(%1) # movsi_gotoffhi"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_got"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "symbolic_operand" "")] UNSPEC_GOT))
+ (use (reg:SI 16))]
+ "flag_pic"
+ "l.lwz \t%0, got(%1)(r16)"
+ [(set_attr "type" "load")]
+)
+
+(define_insn "movsi_tlsgdlo"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "i"))] UNSPEC_TLSGDLO))]
+ ""
+ "l.ori \t%0,%1,tlsgdlo(%2) # movsi_tlsgdlo"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_tlsgdhi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "i")] UNSPEC_TLSGDHI))]
+ ""
+ "l.movhi \t%0,tlsgdhi(%1) # movsi_tlsgdhi"
+[(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_gottpofflo"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "i"))] UNSPEC_GOTTPOFFLO))]
+ ""
+ "l.ori \t%0,%1,gottpofflo(%2) # movsi_gottpofflo"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_gottpoffhi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "i")] UNSPEC_GOTTPOFFHI))]
+ ""
+ "l.movhi \t%0,gottpoffhi(%1) # movsi_gottpoffhi"
+[(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "load_gottpoff"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_GOTTPOFFLD))]
+ ""
+ "l.lwz \t%0,0(%1) # load_gottpoff"
+[(set_attr "type" "load")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_tpofflo"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "i"))] UNSPEC_TPOFFLO))]
+ ""
+ "l.ori \t%0,%1,tpofflo(%2) # movsi_tpofflo"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "movsi_tpoffhi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "i")] UNSPEC_TPOFFHI))]
+ ""
+ "l.movhi \t%0,tpoffhi(%1) # movsi_tpoffhi"
+[(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+
+(define_insn_and_split "movsi_insn_big"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "immediate_operand" "i"))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ ;; the switch of or1k bfd to Rela allows us to schedule insns separately.
+ "l.movhi \t%0,hi(%1)\;l.ori \t%0,%0,lo(%1)"
+ "(GET_CODE (operands[1]) != CONST_INT
+ || ! (CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[1]), 'I', \"I\")
+ || CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[1]), 'K', \"K\")
+ || CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[1]), 'M', \"M\")))
+ && reload_completed
+ && GET_CODE (operands[1]) != HIGH && GET_CODE (operands[1]) != LO_SUM"
+ [(pc)]
+{
+ if (!or1k_expand_symbol_ref(SImode, operands))
+ {
+ emit_insn (gen_movsi_high (operands[0], operands[1]));
+ emit_insn (gen_movsi_lo_sum (operands[0], operands[0], operands[1]));
+ }
+ DONE;
+}
+ [(set_attr "type" "move")
+ (set_attr "length" "2")])
+
+
+;;
+;; Conditional Branches & Moves
+;;
+
+(define_expand "addsicc"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")]
+ ""
+ "FAIL;")
+
+(define_expand "addhicc"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "register_operand" "")
+ (match_operand:HI 3 "register_operand" "")]
+ ""
+ "FAIL;")
+
+(define_expand "addqicc"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand 1 "comparison_operator" "")
+ (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 3 "register_operand" "")]
+ ""
+ "FAIL;")
+
+
+;;
+;; conditional moves
+;;
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")))]
+ "TARGET_MASK_CMOV"
+ "
+{
+ if (or1k_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+}")
+
+(define_expand "movhicc"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "register_operand" "")
+ (match_operand:HI 3 "register_operand" "")))]
+ ""
+ "
+{
+ FAIL;
+}")
+
+(define_expand "movqicc"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 3 "register_operand" "")))]
+ ""
+ "
+{
+ FAIL;
+}")
+
+
+;; We use the BASE_REGS for the cmov input operands because, if rA is
+;; 0, the value of 0 is placed in rD upon truth. Similarly for rB
+;; because we may switch the operands and rB may end up being rA.
+
+(define_insn "cmov"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand 4 "cc_reg_operand" "")
+ (const_int 0)])
+ (match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "register_operand" "r")))]
+ "TARGET_MASK_CMOV"
+ "*
+ return or1k_output_cmov(operands);
+ ")
+
+;;
+;; ....................
+;;
+;; COMPARISONS
+;;
+;; ....................
+
+;; Flow here is rather complex:
+;;
+;; 1) The cmp{si,di,sf,df} routine is called. It deposits the
+;; arguments into the branch_cmp array, and the type into
+;; branch_type. No RTL is generated.
+;;
+;; 2) The appropriate branch define_expand is called, which then
+;; creates the appropriate RTL for the comparison and branch.
+;; Different CC modes are used, based on what type of branch is
+;; done, so that we can constrain things appropriately. There
+;; are assumptions in the rest of GCC that break if we fold the
+;; operands into the branches for integer operations, and use cc0
+;; for floating point, so we use the fp status register instead.
+;; If needed, an appropriate temporary is created to hold the
+;; of the integer compare.
+
+;; Compare insns are next. Note that the RS/6000 has two types of compares,
+;; signed & unsigned, and one type of branch.
+;;
+;; Start with the DEFINE_EXPANDs to generate the rtl for compares, scc
+;; insns, and branches. We store the operands of compares until we see
+;; how it is used.
+
+;; JPB 31-Aug-10: cmpxx appears to be obsolete in GCC 4.5. Needs more
+;; investigation.
+
+;;(define_expand "cmpsi"
+;; [(set (reg:CC CC_REG)
+;; (compare:CC (match_operand:SI 0 "register_operand" "")
+;; (match_operand:SI 1 "nonmemory_operand" "")))]
+;; ""
+;; {
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[0] = force_reg (SImode, operands[0]);
+;; or1k_compare_op0 = operands[0];
+;; or1k_compare_op1 = operands[1];
+;; DONE;
+;; })
+
+;; (define_expand "cmpsf"
+;; [(set (reg:CC CC_REG)
+;; (compare:CC (match_operand:SF 0 "register_operand" "")
+;; (match_operand:SF 1 "register_operand" "")))]
+;; "TARGET_HARD_FLOAT"
+;; {
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[0] = force_reg (SFmode, operands[0]);
+;; or1k_compare_op0 = operands[0];
+;; or1k_compare_op1 = operands[1];
+;; DONE;
+;; })
+
+(define_expand "cbranchsi4"
+ [(match_operator 0 "comparison_operator"
+ [(match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "nonmemory_operand")])
+ (match_operand 3 "")]
+ ""
+ {
+ or1k_expand_conditional_branch (operands, SImode);
+ DONE;
+ })
+
+(define_expand "cbranchsf4"
+ [(match_operator 0 "comparison_operator"
+ [(match_operand:SF 1 "register_operand")
+ (match_operand:SF 2 "register_operand")])
+ (match_operand 3 "")]
+ "TARGET_HARD_FLOAT"
+ {
+ or1k_expand_conditional_branch (operands, SFmode);
+ DONE;
+ })
+
+;;
+;; Setting a CCxx registers from comparision
+;;
+
+
+
+;; Here are the actual compare insns.
+(define_insn "*cmpsi_eq"
+ [(set (reg:CCEQ CC_REG)
+ (compare:CCEQ (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfeqi\t%0,%1 # cmpsi_eq
+ l.sfeq \t%0,%1 # cmpsi_eq"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_ne"
+ [(set (reg:CCNE CC_REG)
+ (compare:CCNE (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfnei\t%0,%1 # cmpsi_ne
+ l.sfne \t%0,%1 # cmpsi_ne"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_gt"
+ [(set (reg:CCGT CC_REG)
+ (compare:CCGT (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfgtsi\t%0,%1 # cmpsi_gt
+ l.sfgts \t%0,%1 # cmpsi_gt"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_gtu"
+ [(set (reg:CCGTU CC_REG)
+ (compare:CCGTU (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfgtui\t%0,%1 # cmpsi_gtu
+ l.sfgtu \t%0,%1 # cmpsi_gtu"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_lt"
+ [(set (reg:CCLT CC_REG)
+ (compare:CCLT (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfltsi\t%0,%1 # cmpsi_lt
+ l.sflts \t%0,%1 # cmpsi_lt"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_ltu"
+ [(set (reg:CCLTU CC_REG)
+ (compare:CCLTU (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfltui\t%0,%1 # cmpsi_ltu
+ l.sfltu \t%0,%1 # cmpsi_ltu"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_ge"
+ [(set (reg:CCGE CC_REG)
+ (compare:CCGE (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfgesi\t%0,%1 # cmpsi_ge
+ l.sfges \t%0,%1 # cmpsi_ge"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+
+(define_insn "*cmpsi_geu"
+ [(set (reg:CCGEU CC_REG)
+ (compare:CCGEU (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfgeui\t%0,%1 # cmpsi_geu
+ l.sfgeu \t%0,%1 # cmpsi_geu"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+
+(define_insn "*cmpsi_le"
+ [(set (reg:CCLE CC_REG)
+ (compare:CCLE (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sflesi\t%0,%1 # cmpsi_le
+ l.sfles \t%0,%1 # cmpsi_le"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsi_leu"
+ [(set (reg:CCLEU CC_REG)
+ (compare:CCLEU (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "nonmemory_operand" "I,r")))]
+ ""
+ "@
+ l.sfleui\t%0,%1 # cmpsi_leu
+ l.sfleu \t%0,%1 # cmpsi_leu"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+;; Single precision floating point evaluation instructions
+(define_insn "*cmpsf_eq"
+ [(set (reg:CCEQ CC_REG)
+ (compare:CCEQ (match_operand:SF 0 "register_operand" "r,r")
+ (match_operand:SF 1 "register_operand" "r,r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sfeq.s\t%0,%1 # cmpsf_eq"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsf_ne"
+ [(set (reg:CCNE CC_REG)
+ (compare:CCNE (match_operand:SF 0 "register_operand" "r,r")
+ (match_operand:SF 1 "register_operand" "r,r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sfne.s\t%0,%1 # cmpsf_ne"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+
+(define_insn "*cmpsf_gt"
+ [(set (reg:CCGT CC_REG)
+ (compare:CCGT (match_operand:SF 0 "register_operand" "r,r")
+ (match_operand:SF 1 "register_operand" "r,r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sfgt.s\t%0,%1 # cmpsf_gt"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsf_ge"
+ [(set (reg:CCGE CC_REG)
+ (compare:CCGE (match_operand:SF 0 "register_operand" "r,r")
+ (match_operand:SF 1 "register_operand" "r,r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sfge.s\t%0,%1 # cmpsf_ge"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+
+(define_insn "*cmpsf_lt"
+ [(set (reg:CCLT CC_REG)
+ (compare:CCLT (match_operand:SF 0 "register_operand" "r,r")
+ (match_operand:SF 1 "register_operand" "r,r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sflt.s\t%0,%1 # cmpsf_lt"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*cmpsf_le"
+ [(set (reg:CCLE CC_REG)
+ (compare:CCLE (match_operand:SF 0 "register_operand" "r,r")
+ (match_operand:SF 1 "register_operand" "r,r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sfle.s\t%0,%1 # cmpsf_le"
+ [(set_attr "type" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*bf"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ return or1k_output_bf(operands);
+ "
+ [(set_attr "type" "branch")
+ (set_attr "length" "1")])
+
+;;
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+(define_insn_and_split "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r, r, m, r")
+ (match_operand:DI 1 "general_operand" " r, m, r, n"))]
+ ""
+ "*
+ return or1k_output_move_double (operands);
+ "
+ "&& reload_completed && CONSTANT_P (operands[1])"
+ [(set (match_dup 2) (match_dup 3)) (set (match_dup 4) (match_dup 5))]
+ "operands[2] = operand_subword (operands[0], 0, 0, DImode);
+ operands[3] = operand_subword (operands[1], 0, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1, 0, DImode);
+ operands[5] = operand_subword (operands[1], 1, 0, DImode);"
+ [(set_attr "length" "2,2,2,3")])
+
+;; Moving double and single precision floating point values
+
+
+(define_insn "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r, r, m, r")
+ (match_operand:DF 1 "general_operand" " r, m, r, i"))]
+ ""
+ "*
+ return or1k_output_move_double (operands);
+ "
+ [(set_attr "length" "2,2,2,3")])
+
+
+(define_insn "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,m,r"))]
+ ""
+ "@
+ l.ori \t%0,%1,0\t # movsf
+ l.lwz \t%0,%1\t # movsf
+ l.sw \t%0,%1\t # movsf"
+ [(set_attr "type" "move,load,store")
+ (set_attr "length" "1,1,1")])
+
+
+;;
+;; extendqisi2
+;;
+
+(define_expand "extendqisi2"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:QI 1 "nonimmediate_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_MASK_SEXT)
+ emit_insn (gen_extendqisi2_sext(operands[0], operands[1]));
+ else {
+ if ( GET_CODE(operands[1]) == MEM ) {
+ emit_insn (gen_extendqisi2_no_sext_mem(operands[0], operands[1]));
+ }
+ else {
+ emit_insn (gen_extendqisi2_no_sext_reg(operands[0], operands[1]));
+ }
+ }
+ DONE;
+}")
+
+(define_insn "extendqisi2_sext"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_MASK_SEXT"
+ "@
+ l.extbs \t%0,%1\t # extendqisi2_has_signed_extend
+ l.lbs \t%0,%1\t # extendqisi2_has_signed_extend"
+ [(set_attr "length" "1,1")
+ (set_attr "type" "extend,load")])
+
+(define_insn "extendqisi2_no_sext_mem"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "!TARGET_MASK_SEXT"
+ "l.lbs \t%0,%1\t # extendqisi2_no_sext_mem"
+ [(set_attr "length" "1")
+ (set_attr "type" "load")])
+
+(define_expand "extendqisi2_no_sext_reg"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "register_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "!TARGET_MASK_SEXT"
+ "
+{
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+;;
+;; extendhisi2
+;;
+
+(define_expand "extendhisi2"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:HI 1 "nonimmediate_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_MASK_SEXT)
+ emit_insn (gen_extendhisi2_sext(operands[0], operands[1]));
+ else {
+ if ( GET_CODE(operands[1]) == MEM ) {
+ emit_insn (gen_extendhisi2_no_sext_mem(operands[0], operands[1]));
+ }
+ else {
+ emit_insn (gen_extendhisi2_no_sext_reg(operands[0], operands[1]));
+ }
+ }
+ DONE;
+}")
+
+(define_insn "extendhisi2_sext"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_MASK_SEXT"
+ "@
+ l.exths \t%0,%1\t # extendhisi2_has_signed_extend
+ l.lhs \t%0,%1\t # extendhisi2_has_signed_extend"
+ [(set_attr "length" "1,1")
+ (set_attr "type" "extend,load")])
+
+(define_insn "extendhisi2_no_sext_mem"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "!TARGET_MASK_SEXT"
+ "l.lhs \t%0,%1\t # extendhisi2_no_sext_mem"
+ [(set_attr "length" "1")
+ (set_attr "type" "load")])
+
+(define_expand "extendhisi2_no_sext_reg"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "register_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ "!TARGET_MASK_SEXT"
+ "
+{
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+
+;;
+;; zero_extend2
+;;
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ l.andi \t%0,%1,0xff\t # zero_extendqisi2
+ l.lbz \t%0,%1\t # zero_extendqisi2"
+ [(set_attr "type" "logic,load")
+ (set_attr "length" "1,1")])
+
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ l.andi \t%0,%1,0xffff\t # zero_extendqisi2
+ l.lhz \t%0,%1\t # zero_extendqisi2"
+ [(set_attr "type" "logic,load")
+ (set_attr "length" "1,1")])
+
+;;
+;; Shift/rotate operations
+;;
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,L")))]
+ ""
+ "@
+ l.sll \t%0,%1,%2 # ashlsi3
+ l.slli \t%0,%1,%2 # ashlsi3"
+ [(set_attr "type" "shift,shift")
+ (set_attr "length" "1,1")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,L")))]
+ ""
+ "@
+ l.sra \t%0,%1,%2 # ashrsi3
+ l.srai \t%0,%1,%2 # ashrsi3"
+ [(set_attr "type" "shift,shift")
+ (set_attr "length" "1,1")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,L")))]
+ ""
+ "@
+ l.srl \t%0,%1,%2 # lshrsi3
+ l.srli \t%0,%1,%2 # lshrsi3"
+ [(set_attr "type" "shift,shift")
+ (set_attr "length" "1,1")])
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,L")))]
+ "TARGET_MASK_ROR"
+ "@
+ l.ror \t%0,%1,%2 # rotrsi3
+ l.rori \t%0,%1,%2 # rotrsi3"
+ [(set_attr "type" "shift,shift")
+ (set_attr "length" "1,1")])
+
+;;
+;; Logical bitwise operations
+;;
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,K")))]
+ ""
+ "@
+ l.and \t%0,%1,%2 # andsi3
+ l.andi \t%0,%1,%2 # andsi3"
+ [(set_attr "type" "logic,logic")
+ (set_attr "length" "1,1")])
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,K")))]
+ ""
+ "@
+ l.or \t%0,%1,%2 # iorsi3
+ l.ori \t%0,%1,%2 # iorsi3"
+ [(set_attr "type" "logic,logic")
+ (set_attr "length" "1,1")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I")))]
+ ""
+ "@
+ l.xor \t%0,%1,%2 # xorsi3
+ l.xori \t%0,%1,%2 # xorsi3"
+ [(set_attr "type" "logic,logic")
+ (set_attr "length" "1,1")])
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (not:QI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "l.xori \t%0,%1,0x00ff # one_cmplqi2"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "l.xori \t%0,%1,0xffff # one_cmplsi2"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+;;
+;; Arithmetic operations
+;;
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "l.sub \t%0,r0,%1 # negsi2"
+ [(set_attr "type" "add")
+ (set_attr "length" "1")])
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I")))]
+ ""
+ "@
+ l.add \t%0,%1,%2 # addsi3
+ l.addi \t%0,%1,%2 # addsi3"
+ [(set_attr "type" "add,add")
+ (set_attr "length" "1,1")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I")))]
+ ""
+ "@
+ l.sub \t%0,%1,%2 # subsi3
+ l.addi \t%0,%1,%n2 # subsi3"
+ [(set_attr "type" "add,add")]
+)
+
+;;
+;; mul and div
+;;
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_HARD_MUL"
+ "l.mul \t%0,%1,%2 # mulsi3"
+ [(set_attr "type" "mul")
+ (set_attr "length" "1")])
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_HARD_DIV"
+ "l.div \t%0,%1,%2 # divsi3"
+ [(set_attr "type" "mul")
+ (set_attr "length" "1")])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_HARD_DIV"
+ "l.divu \t%0,%1,%2 # udivsi3"
+ [(set_attr "type" "mul")
+ (set_attr "length" "1")])
+
+;;
+;; jumps
+;;
+
+;; jump
+
+(define_expand "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "
+{
+ emit_jump_insn (gen_jump_internal (operands[0]));
+ DONE;
+}")
+
+(define_insn "jump_internal"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "l.j \t%l0 # jump_internal%("
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+;; indirect jump
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "
+{
+ emit_jump_insn (gen_indirect_jump_internal (operands[0]));
+ DONE;
+
+}")
+
+(define_insn "indirect_jump_internal"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "l.jr \t%0 # indirect_jump_internal%("
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+;;
+;; calls
+;;
+
+;; call
+
+(define_expand "call"
+ [(parallel [(call (match_operand:SI 0 "sym_ref_mem_operand" "")
+ (match_operand 1 "" "i"))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ "
+{
+ emit_call_insn (gen_call_internal (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "call_internal"
+[(parallel [(call (match_operand:SI 0 "sym_ref_mem_operand" "")
+ (match_operand 1 "" "i"))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ {
+ if (flag_pic)
+ {
+ crtl->uses_pic_offset_table = 1;
+ return "l.jal \tplt(%S0)# call_internal%(";
+ }
+
+ return "l.jal \t%S0# call_internal%(";
+ }
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+;; call value
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (match_operand:SI 1 "sym_ref_mem_operand" "")
+ (match_operand 2 "" "i")))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ "
+{
+ emit_call_insn (gen_call_value_internal (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "call_value_internal"
+[(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (match_operand:SI 1 "sym_ref_mem_operand" "")
+ (match_operand 2 "" "i")))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ {
+ if (flag_pic)
+ {
+ crtl->uses_pic_offset_table = 1;
+ return "l.jal \tplt(%S1) # call_value_internal%(";
+ }
+ return "l.jal \t%S1 # call_value_internal%(";
+ }
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+;; indirect call value
+
+(define_expand "call_value_indirect"
+ [(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand 2 "" "i")))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ "
+{
+ emit_call_insn (gen_call_value_indirect_internal (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "call_value_indirect_internal"
+ [(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand 2 "" "i")))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ "l.jalr \t%1 # call_value_indirect_internal%("
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+;; indirect call
+
+(define_expand "call_indirect"
+ [(parallel [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
+ (match_operand 1 "" "i"))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ "
+{
+ emit_call_insn (gen_call_indirect_internal (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "call_indirect_internal"
+[(parallel [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
+ (match_operand 1 "" "i"))
+ (clobber (reg:SI 9))
+ (use (reg:SI 16))])]
+ ""
+ "l.jalr \t%0 # call_indirect_internal%("
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+;; table jump
+
+(define_expand "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "
+{
+ if (CASE_VECTOR_PC_RELATIVE || flag_pic)
+ operands[0]
+ = force_reg (Pmode,
+ gen_rtx_PLUS (Pmode, operands[0],
+ gen_rtx_LABEL_REF (Pmode, operands[1])));
+ emit_jump_insn (gen_tablejump_internal (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "tablejump_internal"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "l.jr \t%0 # tablejump_internal%("
+ [(set_attr "type" "jump")
+ (set_attr "length" "1")])
+
+
+;; no-op
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "l.nop"
+ [(set_attr "type" "logic")
+ (set_attr "length" "1")])
+
+;;
+;; floating point
+;;
+
+;; floating point arithmetic
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (plus:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.add.s\t%0,%1,%2 # addsf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (plus:DF (match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "lf.add.d\t%0,%1,%2 # adddf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (minus:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.sub.s\t%0,%1,%2 # subsf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (minus:DF (match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "lf.sub.d\t%0,%1,%2 # subdf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (mult:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.mul.s\t%0,%1,%2 # mulsf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (mult:DF (match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "lf.mul.d\t%0,%1,%2 # muldf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (div:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.div.s\t%0,%1,%2 # divsf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (div:DF (match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "lf.div.d\t%0,%1,%2 # divdf3"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+;; Conversion between fixed point and floating point.
+
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float:SF (match_operand:SI 1 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.itof.s\t%0, %1 # floatsisf2"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+;; not working
+(define_insn "fixunssfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "lf.ftoi.s\t%0, %1 # fixunssfsi2"
+ [(set_attr "type" "fp")
+ (set_attr "length" "1")])
+
+;; The insn to set GOT.
+;; TODO: support for no-delay target
+(define_insn "set_got"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_SET_GOT))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 16))]
+ ""
+ "l.jal \t8
+ \tl.movhi \tr16,gotpchi(_GLOBAL_OFFSET_TABLE_-4)
+ \tl.ori \tr16,r16,gotpclo(_GLOBAL_OFFSET_TABLE_+0)
+ \tl.add \tr16,r16,r9"
+ [(set_attr "length" "16")])
+
+(define_expand "atomic_compare_and_swap"
+ [(match_operand:SI 0 "register_operand") ;; bool output
+ (match_operand:AI 1 "register_operand") ;; val output
+ (match_operand:AI 2 "memory_operand") ;; memory
+ (match_operand:AI 3 "register_operand") ;; expected
+ (match_operand:AI 4 "register_operand") ;; desired
+ (match_operand:SI 5 "const_int_operand") ;; is_weak
+ (match_operand:SI 6 "const_int_operand") ;; mod_s
+ (match_operand:SI 7 "const_int_operand")] ;; mod_f
+ "0"
+{
+ if (mode == SImode)
+ emit_insn (gen_cmpxchg (operands[0], operands[1], operands[2], operands[3],
+ operands[4]));
+ else
+ or1k_expand_cmpxchg_qihi (operands[0], operands[1], operands[2],
+ operands[3], operands[4], INTVAL (operands[5]),
+ (enum memmodel) INTVAL (operands[6]),
+ (enum memmodel) INTVAL (operands[7]));
+ DONE;
+})
+
+(define_insn "cmpxchg"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI [(match_operand:SI 2 "memory_operand" "+m")]
+ UNSPEC_CMPXCHG))
+ (set (match_dup 2)
+ (unspec_volatile:SI [(match_operand:SI 3 "register_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (set (match_operand:SI 1 "register_operand" "=&r")
+ (unspec_volatile:SI [(match_dup 2) (match_dup 3)
+ (match_operand:SI 4 "register_operand" "r")]
+ UNSPEC_CMPXCHG))]
+ ""
+ "
+ l.lwa \t%1,%2 # cmpxchg: load
+ l.sfeq \t%1,%3 # cmpxchg: cmp
+ l.bnf \t1f # cmpxchg: not expected
+ l.ori \t%0,r0,0 # cmpxchg: result = 0
+ l.swa \t%2,%4 # cmpxchg: store new
+ l.bnf \t1f # cmpxchg: done
+ l.nop
+ l.ori \t%0,r0,1 # cmpxchg: result = 1
+1:")
+
+(define_insn "cmpxchg_mask"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI [(match_operand:SI 2 "memory_operand" "+m")]
+ UNSPEC_CMPXCHG))
+ (set (match_dup 2)
+ (unspec_volatile:SI [(match_operand:SI 3 "register_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (set (match_operand:SI 1 "register_operand" "=&r")
+ (unspec_volatile:SI [(match_dup 2) (match_dup 3)
+ (match_operand:SI 4 "register_operand" "r")
+ (match_operand:SI 5 "register_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ ""
+ "
+ l.lwa \t%6,%2 # cmpxchg: load
+ l.and \t%1,%6,%5 # cmpxchg: mask
+ l.sfeq \t%1,%3 # cmpxchg: cmp
+ l.bnf \t1f # cmpxchg: not expected
+ l.ori \t%0,r0,0 # cmpxchg: result = 0
+ l.xor \t%6,%6,%1 # cmpxchg: clear
+ l.or \t%6,%6,%4 # cmpxchg: set
+ l.swa \t%2,%6 # cmpxchg: store new
+ l.bnf \t1f # cmpxchg: done
+ l.nop
+ l.ori \t%0,r0,1 # cmpxchg: result = 1
+1:
+ ")
+
+(define_expand "atomic_fetch_"
+ [(match_operand:AI 0 "register_operand")
+ (match_operand:AI 1 "memory_operand")
+ (match_operand:AI 2 "register_operand")
+ (match_operand:SI 3 "const_int_operand")
+ (atomic_op:AI (match_dup 0) (match_dup 1))]
+ ""
+{
+ rtx ret = gen_reg_rtx (mode);
+ if (mode != SImode)
+ or1k_expand_fetch_op_qihi (operands[0], operands[1], operands[2], ret,
+ gen_fetch_and__mask);
+ else
+ emit_insn (gen_fetch_and_ (operands[0], operands[1], operands[2],
+ ret));
+ DONE;
+})
+
+(define_expand "atomic__fetch"
+ [(match_operand:AI 0 "register_operand")
+ (match_operand:AI 1 "memory_operand")
+ (match_operand:AI 2 "register_operand")
+ (match_operand:SI 3 "const_int_operand")
+ (atomic_op:AI (match_dup 0) (match_dup 1))]
+ ""
+{
+ rtx ret = gen_reg_rtx (mode);
+ if (mode != SImode)
+ or1k_expand_fetch_op_qihi (ret, operands[1], operands[2], operands[0],
+ gen_fetch_and__mask);
+ else
+ emit_insn (gen_fetch_and_ (ret, operands[1], operands[2],
+ operands[0]));
+ DONE;
+})
+
+(define_insn "fetch_and_"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (match_operand:SI 1 "memory_operand" "+m"))
+ (set (match_operand:SI 3 "register_operand" "=&r")
+ (unspec_volatile:SI [(match_dup 1)
+ (match_operand:SI 2 "register_operand" "r")]
+ UNSPEC_FETCH_AND_OP))
+ (set (match_dup 1)
+ (match_dup 3))
+ (atomic_op:SI (match_dup 0) (match_dup 1))]
+ ""
+ "
+1:
+ l.lwa \t%0,%1 # fetch_: load
+ l.\t\t%3,%0,%2 # fetch_: logic
+
+ l.swa \t%1,%3 # fetch_: store new
+ l.bnf \t1b # fetch_: done
+ l.nop
+ ")
+
+(define_insn "fetch_and__mask"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (match_operand:SI 1 "memory_operand" "+m"))
+ (set (match_operand:SI 3 "register_operand" "=&r")
+ (unspec_volatile:SI [(match_dup 1)
+ (match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 4 "register_operand" "r")]
+ UNSPEC_FETCH_AND_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SI [(match_dup 3) (match_dup 4)] UNSPEC_FETCH_AND_OP))
+ (clobber (match_scratch:SI 5 "=&r"))
+ (atomic_op:SI (match_dup 0) (match_dup 1))]
+ ""
+ "
+1:
+ l.lwa \t%0,%1 # fetch_: load
+ l.and \t%5,%0,%4 # fetch_: mask
+ l.xor \t%5,%0,%5 # fetch_: clear
+ l.\t\t%3,%0,%2 # fetch_: logic
+
+ l.and \t%3,%3,%4 # fetch_: mask result
+ l.or \t%3,%5,%3 # fetch_: set
+ l.swa \t%1,%3 # fetch_: store new
+ l.bnf \t1b # fetch_: done
+ l.nop
+ ")
+
+;; Local variables:
+;; mode:emacs-lisp
+;; comment-start: ";; "
+;; eval: (set-syntax-table (copy-sequence (syntax-table)))
+;; eval: (modify-syntax-entry ?[ "(]")
+;; eval: (modify-syntax-entry ?] ")[")
+;; eval: (modify-syntax-entry ?{ "(}")
+;; eval: (modify-syntax-entry ?} "){")
+;; eval: (setq indent-tabs-mode t)
+;; End:
diff -rNU3 dist.orig/gcc/config/or1k/or1k.opt dist/gcc/config/or1k/or1k.opt
--- dist.orig/gcc/config/or1k/or1k.opt 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/or1k.opt 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,96 @@
+; Options for the OR1K port of the compiler
+; This file is part of GCC.
+;
+; Copyright (C) 2010 Embecosm Limited
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; .
+
+HeaderInclude
+config/or1k/or1k-opts.h
+
+Variable
+enum or1k_delay or1k_delay_selected = OR1K_DELAY_DEFAULT
+
+mdelay
+Target RejectNegative Negative(mno-delay) Var(or1k_delay_selected, OR1K_DELAY_ON)
+Assume branches and jumps have a delay slot
+
+mno-delay
+Target RejectNegative Negative(mcompat-delay) Var(or1k_delay_selected, OR1K_DELAY_OFF)
+Assume branches and jumps do not have a delay slot
+
+mcompat-delay
+Target RejectNegative Negative(mdelay) Var(or1k_delay_selected, OR1K_DELAY_COMPAT)
+Assume branches and jumps have a delay slot, but fill them with nops
+
+mhard-float
+Target RejectNegative Mask(HARD_FLOAT)
+Use hardware floating point
+
+msoft-float
+Target RejectNegative InverseMask(HARD_FLOAT)
+Do not use hardware floating point
+
+mdouble-float
+Target Report RejectNegative Mask(DOUBLE_FLOAT)
+Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations
+
+mhard-div
+Target RejectNegative Mask(HARD_DIV)
+Use hardware division
+
+msoft-div
+Target RejectNegative InverseMask(HARD_DIV)
+Do not use hardware division
+
+mhard-mul
+Target RejectNegative Mask(HARD_MUL)
+Use hardware multiplication
+
+msoft-mul
+Target RejectNegative InverseMask(HARD_MUL)
+Do not use hardware multiplication
+
+msext
+Target Mask(MASK_SEXT)
+Use sign-extending instructions
+
+mcmov
+Target Mask(MASK_CMOV)
+Use conditional move instructions
+
+mror
+Target Mask(MASK_ROR)
+Emit ROR instructions
+
+mboard=
+Target RejectNegative Joined
+Link with libgloss configuration suitable for this board
+
+mnewlib
+Target Report RejectNegative
+Compile for the Linux/Gnu/newlib based toolchain
+
+;; provide struct padding as in previous releases.
+;; Note that this will only affect STRUCTURE_SIZE_BOUNDARY, in particular
+;; make 2 byte structs 4-byte alignned and sized.
+;; We still use ROUND_TYPE_ALIGN to increase alignment of larger structs.
+mpadstruct
+Target Report RejectNegative Mask(PADSTRUCT)
+Make structs a multiple of 4 bytes (warning: ABI altered)
+
+mredzone=
+Target RejectNegative Joined UInteger Var(or1k_redzone) Init(128)
+Set the size of the stack below sp that is assumed to be safe from interrupts.
diff -rNU3 dist.orig/gcc/config/or1k/predicates.md dist/gcc/config/or1k/predicates.md
--- dist.orig/gcc/config/or1k/predicates.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/predicates.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,121 @@
+;; Predicate definitions for OR32
+;;
+;; Copyright (C) 2010 Embecosm Limited
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+(define_predicate "cc_reg_operand"
+ (match_code "subreg,reg")
+{
+ register_operand (op, mode);
+
+ if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
+ return 1;
+
+ return 0;
+})
+
+(define_predicate "input_operand"
+ (match_code "subreg,reg,const_int,mem,const")
+{
+ /* If both modes are non-void they must be the same. */
+ if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
+ return 0;
+
+ /* Allow any one instruction integer constant, and all CONST_INT
+ variants when we are working in DImode and !arch64. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && ((GET_CODE (op) == CONST_INT)
+ && (satisfies_constraint_K (op)
+ || satisfies_constraint_M (op)
+ || satisfies_constraint_I (op))))
+ return 1;
+
+ if (register_operand (op, mode))
+ return 1;
+
+ /* If this is a SUBREG, look inside so that we handle
+ paradoxical ones. */
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+
+ /* Check for valid MEM forms. */
+ if (GET_CODE (op) == MEM)
+ return memory_address_p (mode, XEXP (op, 0));
+
+ return 0;
+})
+
+(define_predicate "sym_ref_mem_operand"
+ (match_code "mem")
+{
+ if (GET_CODE (op) == MEM)
+ {
+ rtx t1 = XEXP (op, 0);
+ if (GET_CODE (t1) == SYMBOL_REF)
+ return 1;
+ }
+ return 0;
+})
+
+;; True iff OP is a symbolic operand.
+
+(define_predicate "symbolic_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ return !SYMBOL_REF_TLS_MODEL (op);
+ case LABEL_REF:
+ return true;
+ case CONST:
+ op = XEXP (op, 0);
+ return (GET_CODE (op) == PLUS
+ && ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+ default:
+ break;
+ }
+ return false;
+})
+
+;; Return true if OP is a symbolic operand for the TLS Global Dynamic model.
+(define_predicate "tgd_symbolic_operand"
+ (and (match_code "symbol_ref")
+ (match_test "SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_GLOBAL_DYNAMIC")))
+
+;; Return true if OP is a symbolic operand for the TLS Local Dynamic model.
+
+(define_predicate "tld_symbolic_operand"
+ (and (match_code "symbol_ref")
+ (match_test "SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_DYNAMIC")))
+
+;; Return true if OP is a symbolic operand for the TLS Initial Exec model.
+
+(define_predicate "tie_symbolic_operand"
+ (and (match_code "symbol_ref")
+ (match_test "SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_INITIAL_EXEC")))
+
+;; Return true if OP is a symbolic operand for the TLS Local Exec model.
+
+(define_predicate "tle_symbolic_operand"
+ (and (match_code "symbol_ref")
+ (match_test "SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_EXEC")))
diff -rNU3 dist.orig/gcc/config/or1k/t-linux dist/gcc/config/or1k/t-linux
--- dist.orig/gcc/config/or1k/t-linux 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/t-linux 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,12 @@
+MULTILIB_DIRNAMES =
+EXTRA_MULTILIB_PARTS = crti.o crtbegin.o crtend.o crtn.o
+
+# hack:
+# the non-shared uclibc-0.9.31/libc/misc/internals/__uClibc_main.c
+# already defines __dso_handle. To avoid a duplicate definition,
+# we rename the crtbegin one.
+# JPB 18-Nov-10: Commented out, since uClibc no longer defines.
+# amend gcc Makefile CLFAGS variable
+# $(T)crtbegin.o: CRTSTUFF_CFLAGS += '-D__dso_handle=__dso_handle_dummy'
+# amend libgcc Makefile CLFAGS variable
+# crtbegin$(objext): CRTSTUFF_T_CFLAGS += '-D__dso_handle=__dso_handle_dummy'
diff -rNU3 dist.orig/gcc/config/or1k/t-or1k dist/gcc/config/or1k/t-or1k
--- dist.orig/gcc/config/or1k/t-or1k 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/t-or1k 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,28 @@
+# t-or1k is a Makefile fragment to be included when
+# building gcc for the or1k target
+
+# Copyright (C) 2010 Embecosm Limited
+
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# .
+
+# we don't support -g so don't use it
+LIBGCC2_DEBUG_CFLAGS =
+
+# Build the libraries for both hard and soft floating point
+MULTILIB_OPTIONS = mno-delay/mcompat-delay msoft-float
+MULTILIB_DIRNAMES = no-delay compat-delay soft-float
+MULTILIB_MATCHES =
diff -rNU3 dist.orig/gcc/config/or1k/t-or1knd dist/gcc/config/or1k/t-or1knd
--- dist.orig/gcc/config/or1k/t-or1knd 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/or1k/t-or1knd 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,28 @@
+# t-or1knd is a Makefile fragment to be included when
+# building gcc for the or1k target
+
+# Copyright (C) 2010 Embecosm Limited
+
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# .
+
+# we don't support -g so don't use it
+LIBGCC2_DEBUG_CFLAGS =
+
+# Build the libraries for both hard and soft floating point
+MULTILIB_OPTIONS = mdelay/mcompat-delay msoft-float
+MULTILIB_DIRNAMES = delay compat-delay soft-float
+MULTILIB_MATCHES =
diff -rNU3 dist.orig/gcc/config/pa/pa-netbsd.h dist/gcc/config/pa/pa-netbsd.h
--- dist.orig/gcc/config/pa/pa-netbsd.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/pa/pa-netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,148 @@
+/* Definitions for PA_RISC with ELF format
+ Copyright (C) 1999-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ builtin_assert ("machine=bigendian"); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC NETBSD_CPP_SPEC
+
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{v:-V} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+
+#undef EXTRA_SPECS
+#define EXTRA_SPECS \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+#define NETBSD_ENTRY_POINT "__start"
+
+#undef LINK_SPEC
+#define LINK_SPEC NETBSD_LINK_SPEC_ELF
+
+/* NetBSD profiling functions don't need gcc to allocate counters. */
+#define NO_DEFERRED_PROFILE_COUNTERS 1
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#undef STRING_ASM_OP
+#define STRING_ASM_OP "\t.stringz\t"
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define BSS_SECTION_ASM_OP "\t.section\t.bss"
+
+#define TARGET_ASM_FILE_START pa_linux_file_start
+
+/* We want local labels to start with period if made with asm_fprintf. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* Define these to generate the Linux/ELF/SysV style of internal
+ labels all the time - i.e. to be compatible with
+ ASM_GENERATE_INTERNAL_LABEL in . Compare these with the
+ ones in pa.h and note the lack of dollar signs in these. FIXME:
+ shouldn't we fix pa.h to use ASM_GENERATE_INTERNAL_LABEL instead? */
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ if (TARGET_BIG_SWITCH) \
+ fprintf (FILE, "\t.word .L%d\n", VALUE); \
+ else \
+ fprintf (FILE, "\tb .L%d\n\tnop\n", VALUE)
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ if (TARGET_BIG_SWITCH) \
+ fprintf (FILE, "\t.word .L%d-.L%d\n", VALUE, REL); \
+ else \
+ fprintf (FILE, "\tb .L%d\n\tnop\n", VALUE)
+
+/* Use the default. */
+#undef ASM_OUTPUT_LABEL
+
+/* NOTE: (*targetm.asm_out.internal_label)() is defined for us by elfos.h, and
+ does what we want (i.e. uses colons). It must be compatible with
+ ASM_GENERATE_INTERNAL_LABEL(), so do not define it here. */
+
+/* Use the default. */
+#undef ASM_OUTPUT_INTERNAL_LABEL
+
+/* Use the default. */
+#undef TARGET_ASM_GLOBALIZE_LABEL
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP ".globl "
+
+/* FIXME: Hacked from the one so that we avoid multiple
+ labels in a function declaration (since pa.c seems determined to do
+ it differently) */
+
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ } \
+ while (0)
+
+/* As well as globalizing the label, we need to encode the label
+ to ensure a plabel is generated in an indirect call. */
+
+#undef ASM_OUTPUT_EXTERNAL_LIBCALL
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ do \
+ { \
+ if (!FUNCTION_NAME_P (XSTR (FUN, 0))) \
+ pa_encode_label (FUN); \
+ (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0)); \
+ } \
+ while (0)
+
+/* NetBSD always uses gas. */
+#undef TARGET_GAS
+#define TARGET_GAS 1
+
+/* Use long int for these type to make hppa64 compatibility easier. */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#if 0
+#undef TARGET_SYNC_LIBCALL
+#define TARGET_SYNC_LIBCALL 1
+#endif
+
+#if 0
+#undef TARGET_SYNC_LIBCALL
+#define TARGET_SYNC_LIBCALL 1
+#endif
diff -rNU3 dist.orig/gcc/config/pa/pa.c dist/gcc/config/pa/pa.c
--- dist.orig/gcc/config/pa/pa.c 2015-06-12 01:26:24.000000000 +0200
+++ dist/gcc/config/pa/pa.c 2015-10-18 13:19:50.000000000 +0200
@@ -1066,9 +1066,9 @@
|| GET_CODE (XEXP (x, 0)) == REG))
{
rtx int_part, ptr_reg;
- int newoffset;
- int offset = INTVAL (XEXP (x, 1));
- int mask;
+ HOST_WIDE_INT newoffset;
+ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT mask;
mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
&& !INT14_OK_STRICT ? 0x1f : 0x3fff);
@@ -1122,7 +1122,7 @@
|| GET_CODE (XEXP (x, 1)) == SUBREG)
&& GET_CODE (XEXP (x, 1)) != CONST)
{
- int val = INTVAL (XEXP (XEXP (x, 0), 1));
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
rtx reg1, reg2;
reg1 = XEXP (x, 1);
@@ -1199,7 +1199,7 @@
&& INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
{
/* Divide the CONST_INT by the scale factor, then add it to A. */
- int val = INTVAL (XEXP (idx, 1));
+ HOST_WIDE_INT val = INTVAL (XEXP (idx, 1));
val /= INTVAL (XEXP (XEXP (idx, 0), 1));
reg1 = XEXP (XEXP (idx, 0), 0);
@@ -1222,7 +1222,7 @@
&& INTVAL (XEXP (idx, 1)) <= 4096
&& INTVAL (XEXP (idx, 1)) >= -4096)
{
- int val = INTVAL (XEXP (XEXP (idx, 0), 1));
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (idx, 0), 1));
rtx reg1, reg2;
reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
@@ -1306,7 +1306,7 @@
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
&& pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
{
- int val = INTVAL (XEXP (XEXP (x, 0), 1));
+ HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
rtx reg1, reg2;
reg1 = XEXP (x, 1);
@@ -2779,8 +2779,8 @@
const char *
pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
{
- int align = INTVAL (operands[5]);
- unsigned long n_bytes = INTVAL (operands[4]);
+ HOST_WIDE_INT align = INTVAL (operands[5]);
+ unsigned HOST_WIDE_INT n_bytes = INTVAL (operands[4]);
/* We can't move more than a word at a time because the PA
has no longer integer move insns. (Could use fp mem ops?) */
@@ -2907,8 +2907,8 @@
compute_movmem_length (rtx insn)
{
rtx pat = PATTERN (insn);
- unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
- unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
+ unsigned HOST_WIDE_INT align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
+ unsigned HOST_WIDE_INT n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
unsigned int n_insns = 0;
/* We can't move more than four bytes at a time because the PA
@@ -2943,8 +2943,8 @@
const char *
pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
{
- int align = INTVAL (operands[3]);
- unsigned long n_bytes = INTVAL (operands[2]);
+ HOST_WIDE_INT align = INTVAL (operands[3]);
+ unsigned HOST_WIDE_INT n_bytes = INTVAL (operands[2]);
/* We can't clear more than a word at a time because the PA
has no longer integer move insns. */
@@ -3049,8 +3049,8 @@
compute_clrmem_length (rtx insn)
{
rtx pat = PATTERN (insn);
- unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
- unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
+ unsigned HOST_WIDE_INT align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
+ unsigned HOST_WIDE_INT n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
unsigned int n_insns = 0;
/* We can't clear more than a word at a time because the PA
@@ -5562,7 +5562,7 @@
static void
pa_linux_file_start (void)
{
- pa_file_start_file (1);
+ pa_file_start_file (0);
pa_file_start_level ();
pa_file_start_mcount ("CODE");
}
@@ -5787,7 +5787,7 @@
const char *
pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
{
- int divisor;
+ HOST_WIDE_INT divisor;
/* If the divisor is a constant, try to use one of the special
opcodes .*/
diff -rNU3 dist.orig/gcc/config/pa/pa32-netbsd.h dist/gcc/config/pa/pa32-netbsd.h
--- dist.orig/gcc/config/pa/pa32-netbsd.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/pa/pa32-netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,37 @@
+/* Definitions for PA_RISC with ELF-32 format
+ Copyright (C) 2000, 2002 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Turn off various SOM crap we don't want. */
+#undef TARGET_ELF32
+#define TARGET_ELF32 1
+
+/* The libcall __canonicalize_funcptr_for_compare is referenced in
+ crtend.o and the reference isn't resolved in objects that don't
+ compare function pointers. Thus, we need to play games to provide
+ a reference in crtbegin.o. The rest of the define is the same
+ as that in crtstuff.c */
+#define CTOR_LIST_BEGIN \
+ asm (".type __canonicalize_funcptr_for_compare,@function\n" \
+" .text\n" \
+" .word __canonicalize_funcptr_for_compare-$PIC_pcrel$0"); \
+ STATIC func_ptr __CTOR_LIST__[1] \
+ __attribute__ ((__unused__, section(".ctors"), \
+ aligned(sizeof(func_ptr)))) \
+ = { (func_ptr) (-1) }
diff -rNU3 dist.orig/gcc/config/pa/t-netbsd dist/gcc/config/pa/t-netbsd
--- dist.orig/gcc/config/pa/t-netbsd 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/pa/t-netbsd 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1 @@
+#MULTIARCH_DIRNAME = $(call if_multiarch,hppa-linux-gnu)
diff -rNU3 dist.orig/gcc/config/riscv/constraints.md dist/gcc/config/riscv/constraints.md
--- dist.orig/gcc/config/riscv/constraints.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/constraints.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,90 @@
+;; Constraint definitions for RISC-V target.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+;; Based on MIPS target for GNU compiler.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+;; Register constraints
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
+ "A floating-point register (if available).")
+
+(define_register_constraint "b" "ALL_REGS"
+ "@internal")
+
+(define_register_constraint "j" "T_REGS"
+ "@internal")
+
+;; Integer constraints
+
+(define_constraint "Z"
+ "@internal"
+ (and (match_code "const_int")
+ (match_test "1")))
+
+(define_constraint "I"
+ "An I-type 12-bit signed immediate."
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND (ival)")))
+
+(define_constraint "J"
+ "Integer zero."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+;; Floating-point constraints
+
+(define_constraint "G"
+ "Floating-point zero."
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; General constraints
+
+(define_constraint "Q"
+ "@internal"
+ (match_operand 0 "const_arith_operand"))
+
+(define_memory_constraint "A"
+ "An address that is held in a general-purpose register."
+ (and (match_code "mem")
+ (match_test "GET_CODE(XEXP(op,0)) == REG")))
+
+(define_constraint "S"
+ "@internal
+ A constant call address."
+ (and (match_operand 0 "call_insn_operand")
+ (match_test "CONSTANT_P (op)")))
+
+(define_constraint "T"
+ "@internal
+ A constant @code{move_operand}."
+ (and (match_operand 0 "move_operand")
+ (match_test "CONSTANT_P (op)")))
+
+(define_memory_constraint "W"
+ "@internal
+ A memory address based on a member of @code{BASE_REG_CLASS}."
+ (and (match_code "mem")
+ (match_operand 0 "memory_operand")))
+
+(define_constraint "YG"
+ "@internal
+ A vector zero."
+ (and (match_code "const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
diff -rNU3 dist.orig/gcc/config/riscv/default-32.h dist/gcc/config/riscv/default-32.h
--- dist.orig/gcc/config/riscv/default-32.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/default-32.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,22 @@
+/* Definitions of target machine for GCC, for RISC-V,
+ defaulting to 32-bit code generation.
+
+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#define TARGET_64BIT_DEFAULT 0
diff -rNU3 dist.orig/gcc/config/riscv/elf.h dist/gcc/config/riscv/elf.h
--- dist.orig/gcc/config/riscv/elf.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/elf.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,31 @@
+/* Target macros for riscv*-elf targets.
+ Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+/* Leave the linker script to choose the appropriate libraries. */
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+#define NO_IMPLICIT_EXTERN_C 1
diff -rNU3 dist.orig/gcc/config/riscv/generic.md dist/gcc/config/riscv/generic.md
--- dist.orig/gcc/config/riscv/generic.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/generic.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,98 @@
+;; Generic DFA-based pipeline description for RISC-V targets.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+;; Based on MIPS target for GNU compiler.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+
+;; This file is derived from the old define_function_unit description.
+;; Each reservation can be overridden on a processor-by-processor basis.
+
+(define_insn_reservation "generic_alu" 1
+ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
+ "alu")
+
+(define_insn_reservation "generic_load" 3
+ (eq_attr "type" "load,fpload,fpidxload")
+ "alu")
+
+(define_insn_reservation "generic_store" 1
+ (eq_attr "type" "store,fpstore,fpidxstore")
+ "alu")
+
+(define_insn_reservation "generic_xfer" 2
+ (eq_attr "type" "mfc,mtc")
+ "alu")
+
+(define_insn_reservation "generic_branch" 1
+ (eq_attr "type" "branch,jump,call")
+ "alu")
+
+(define_insn_reservation "generic_imul" 17
+ (eq_attr "type" "imul")
+ "imuldiv*17")
+
+(define_insn_reservation "generic_idiv" 38
+ (eq_attr "type" "idiv")
+ "imuldiv*38")
+
+(define_insn_reservation "generic_fcvt" 1
+ (eq_attr "type" "fcvt")
+ "alu")
+
+(define_insn_reservation "generic_fmove" 2
+ (eq_attr "type" "fmove")
+ "alu")
+
+(define_insn_reservation "generic_fcmp" 3
+ (eq_attr "type" "fcmp")
+ "alu")
+
+(define_insn_reservation "generic_fadd" 4
+ (eq_attr "type" "fadd")
+ "alu")
+
+(define_insn_reservation "generic_fmul_single" 7
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF"))
+ "alu")
+
+(define_insn_reservation "generic_fmul_double" 8
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF"))
+ "alu")
+
+(define_insn_reservation "generic_fdiv_single" 23
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "mode" "SF"))
+ "alu")
+
+(define_insn_reservation "generic_fdiv_double" 36
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "mode" "DF"))
+ "alu")
+
+(define_insn_reservation "generic_fsqrt_single" 54
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "mode" "SF"))
+ "alu")
+
+(define_insn_reservation "generic_fsqrt_double" 112
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "mode" "DF"))
+ "alu")
diff -rNU3 dist.orig/gcc/config/riscv/linux-unwind.h dist/gcc/config/riscv/linux-unwind.h
--- dist.orig/gcc/config/riscv/linux-unwind.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/linux-unwind.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,50 @@
+/* DWARF2 EH unwinding support for RISC-V Linux.
+ Copyright (C) 2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+. */
+
+#ifndef inhibit_libc
+/* Examine the code and attempt to identify a signal frame. */
+
+#include
+#include
+
+#define MD_FALLBACK_FRAME_STATE_FOR riscv_fallback_frame_state
+
+static _Unwind_Reason_Code
+riscv_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ unsigned int *pc = (unsigned int *) context->ra;
+
+ /* Signal frames begin with the following code sequence:
+ li v0, __NR_rt_sigreturn
+ scall */
+ if (((unsigned long)pc & 0x3) != 0
+ || pc[0] != RISCV_ITYPE (ADDI, GP_RETURN, 0, __NR_rt_sigreturn)
+ || pc[1] != RISCV_ITYPE (SCALL, 0, 0, 0))
+ return _URC_END_OF_STACK;
+
+ /* TODO: Actually implement this. */
+ abort();
+}
+#endif
diff -rNU3 dist.orig/gcc/config/riscv/linux.h dist/gcc/config/riscv/linux.h
--- dist.orig/gcc/config/riscv/linux.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/linux.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,60 @@
+/* Definitions for RISC-V GNU/Linux systems with ELF format.
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
+ 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ } while (0)
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+
+/* Borrowed from sparc/linux.h */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
+ %{static:-static}}"
+
+#undef LIB_SPEC
+#define LIB_SPEC "\
+%{pthread:-lpthread} \
+%{shared:-lc} \
+%{!shared: \
+ %{profile:-lc_p} %{!profile:-lc}}"
+
+/* Similar to standard Linux, but adding -ffast-math support. */
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
diff -rNU3 dist.orig/gcc/config/riscv/linux64.h dist/gcc/config/riscv/linux64.h
--- dist.orig/gcc/config/riscv/linux64.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/linux64.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,43 @@
+/* Definitions for 64-bit RISC-V GNU/Linux systems with ELF format.
+ Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+/* Force the default ABI flags onto the command line
+ in order to make the other specs easier to write. */
+#undef LIB_SPEC
+#define LIB_SPEC "\
+%{pthread:-lpthread} \
+%{shared:-lc} \
+%{!shared: \
+ %{profile:-lc_p} %{!profile:-lc}}"
+
+#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
+#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
+
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%{shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
+ %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
+ %{static:-static}} \
+%{" OPT_ARCH64 ":-melf64lriscv} \
+%{" OPT_ARCH32 ":-melf32lriscv}"
diff -rNU3 dist.orig/gcc/config/riscv/netbsd.h dist/gcc/config/riscv/netbsd.h
--- dist.orig/gcc/config/riscv/netbsd.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/netbsd.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,114 @@
+/* Definitions for RISCV running NetBSD systems using ELF
+ Copyright (C) 2014
+ Free Software Foundation, Inc.
+ Contributed by Matt Thomas
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_USE_GP
+#define TARGET_USE_GP 0
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS ""
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FDIV)
+
+#undef TARGET_DEFAULT_CMODEL
+#define TARGET_DEFAULT_CMODEL CM_MEDANY
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ if (!TARGET_HARD_FLOAT_ABI) \
+ builtin_define ("_SOFT_FLOAT"); \
+ } while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC NETBSD_CPP_SPEC
+
+#undef LIB_SPEC
+#define LIB_SPEC NETBSD_LIB_SPEC
+
+#undef LINK_SPEC
+#define LINK_SPEC NETBSD_LINK_SPEC_ELF
+/* Provide a LINK_SPEC appropriate for a NetBSD/mips target.
+ This is a copy of LINK_SPEC from tweaked for
+ the MIPS target. */
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{m64:-m elf64lriscv} \
+ %{m32:-m elf32lriscv} \
+ %(netbsd_link_spec)"
+
+#undef NETBSD_ENTRY_POINT
+#define NETBSD_ENTRY_POINT "_start"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT }, \
+ { "netbsd_endfile_spec", NETBSD_ENDFILE_SPEC },
+
+#define SIG_ATOMIC_TYPE "int"
+
+#define INT8_TYPE "signed char"
+#define INT16_TYPE "short int"
+#define INT32_TYPE "int"
+#define INT64_TYPE "long long int"
+#define UINT8_TYPE "unsigned char"
+#define UINT16_TYPE "short unsigned int"
+#define UINT32_TYPE "unsigned int"
+#define UINT64_TYPE "long long unsigned int"
+
+#define INT_LEAST8_TYPE "signed char"
+#define INT_LEAST16_TYPE "short int"
+#define INT_LEAST32_TYPE "int"
+#define INT_LEAST64_TYPE "long long int"
+#define UINT_LEAST8_TYPE "unsigned char"
+#define UINT_LEAST16_TYPE "short unsigned int"
+#define UINT_LEAST32_TYPE "unsigned int"
+#define UINT_LEAST64_TYPE "long long unsigned int"
+
+#define INT_FAST8_TYPE "signed char"
+#define INT_FAST16_TYPE "short int"
+#define INT_FAST32_TYPE "int"
+#define INT_FAST64_TYPE "long long int"
+#define UINT_FAST8_TYPE "unsigned char"
+#define UINT_FAST16_TYPE "short unsigned int"
+#define UINT_FAST32_TYPE "unsigned int"
+#define UINT_FAST64_TYPE "long long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#define INTPTR_TYPE PTRDIFF_TYPE
+#define UINTPTR_TYPE SIZE_TYPE
+
+#undef INTMAX_TYPE
+#define INTMAX_TYPE "long long int"
+
+#undef UINTMAX_TYPE
+#define UINTMAX_TYPE "long long unsigned int"
diff -rNU3 dist.orig/gcc/config/riscv/opcode-riscv.h dist/gcc/config/riscv/opcode-riscv.h
--- dist.orig/gcc/config/riscv/opcode-riscv.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/opcode-riscv.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,149 @@
+/* RISC-V ISA encoding.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+ Based on MIPS target for GNU compiler.
+
+This file is part of GDB, GAS, and the GNU binutils.
+
+GDB, GAS, and the GNU binutils are free software; you can redistribute
+them and/or modify them under the terms of the GNU General Public
+License as published by the Free Software Foundation; either version
+1, or (at your option) any later version.
+
+GDB, GAS, and the GNU binutils are distributed in the hope that they
+will be useful, but WITHOUT ANY WARRANTY; without even the implied
+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this file; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _RISCV_H_
+#define _RISCV_H_
+
+#define RV_X(x, s, n) (((x) >> (s)) & ((1<<(n))-1))
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+
+#define EXTRACT_ITYPE_IMM(x) \
+ (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
+#define EXTRACT_STYPE_IMM(x) \
+ (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
+#define EXTRACT_SBTYPE_IMM(x) \
+ ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
+#define EXTRACT_UTYPE_IMM(x) \
+ ((RV_X(x, 12, 20) << 20) | (RV_IMM_SIGN(x) << 32))
+#define EXTRACT_UJTYPE_IMM(x) \
+ ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
+
+#define ENCODE_ITYPE_IMM(x) \
+ (RV_X(x, 0, 12) << 20)
+#define ENCODE_STYPE_IMM(x) \
+ ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
+#define ENCODE_SBTYPE_IMM(x) \
+ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
+#define ENCODE_UTYPE_IMM(x) \
+ (RV_X(x, 12, 20) << 12)
+#define ENCODE_UJTYPE_IMM(x) \
+ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
+
+#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
+#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
+#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
+#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
+#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
+
+#define RISCV_RTYPE(insn, rd, rs1, rs2) \
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
+#define RISCV_ITYPE(insn, rd, rs1, imm) \
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
+#define RISCV_STYPE(insn, rs1, rs2, imm) \
+ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
+#define RISCV_SBTYPE(insn, rs1, rs2, target) \
+ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
+#define RISCV_UTYPE(insn, rd, bigimm) \
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
+#define RISCV_UJTYPE(insn, rd, target) \
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
+
+#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
+
+#define RISCV_CONST_HIGH_PART(VALUE) \
+ (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
+#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
+
+/* RV fields */
+
+#define OP_MASK_OP 0x7f
+#define OP_SH_OP 0
+#define OP_MASK_RS2 0x1f
+#define OP_SH_RS2 20
+#define OP_MASK_RS1 0x1f
+#define OP_SH_RS1 15
+#define OP_MASK_RS3 0x1f
+#define OP_SH_RS3 27
+#define OP_MASK_RD 0x1f
+#define OP_SH_RD 7
+#define OP_MASK_SHAMT 0x3f
+#define OP_SH_SHAMT 20
+#define OP_MASK_SHAMTW 0x1f
+#define OP_SH_SHAMTW 20
+#define OP_MASK_RM 0x7
+#define OP_SH_RM 12
+#define OP_MASK_PRED 0xf
+#define OP_SH_PRED 24
+#define OP_MASK_SUCC 0xf
+#define OP_SH_SUCC 20
+#define OP_MASK_AQ 0x1
+#define OP_SH_AQ 26
+#define OP_MASK_RL 0x1
+#define OP_SH_RL 25
+
+#define OP_MASK_VRD 0x1f
+#define OP_SH_VRD 7
+#define OP_MASK_VRS 0x1f
+#define OP_SH_VRS 15
+#define OP_MASK_VRT 0x1f
+#define OP_SH_VRT 20
+#define OP_MASK_VRR 0x1f
+#define OP_SH_VRR 25
+
+#define OP_MASK_VFD 0x1f
+#define OP_SH_VFD 7
+#define OP_MASK_VFS 0x1f
+#define OP_SH_VFS 15
+#define OP_MASK_VFT 0x1f
+#define OP_SH_VFT 20
+#define OP_MASK_VFR 0x1f
+#define OP_SH_VFR 25
+
+#define OP_MASK_IMMNGPR 0x3f
+#define OP_SH_IMMNGPR 20
+#define OP_MASK_IMMNFPR 0x3f
+#define OP_SH_IMMNFPR 26
+#define OP_MASK_IMMSEGNELM 0x1f
+#define OP_SH_IMMSEGNELM 17
+#define OP_MASK_IMMSEGSTNELM 0x1f
+#define OP_SH_IMMSEGSTNELM 12
+#define OP_MASK_CUSTOM_IMM 0x7f
+#define OP_SH_CUSTOM_IMM 25
+
+#define LINK_REG 1
+
+#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
+#define RISCV_JUMP_ALIGN_BITS 1
+#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
+#define RISCV_JUMP_REACH ((1ULL< SI optimizations
+;;........................
+
+;; Simplify (int)(a + 1), etc.
+(define_peephole2
+ [(set (match_operand:DI 0 "register_operand")
+ (match_operator:DI 4 "modular_operator"
+ [(match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "arith_operand")]))
+ (set (match_operand:SI 3 "register_operand")
+ (truncate:SI (match_dup 0)))]
+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
+ && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
+ [(set (match_dup 3)
+ (truncate:SI
+ (match_op_dup:DI 4
+ [(match_operand:DI 1 "register_operand")
+ (match_operand:DI 2 "arith_operand")])))])
+
+;; Simplify (int)a + 1, etc.
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (truncate:SI (match_operand:DI 1 "register_operand")))
+ (set (match_operand:SI 3 "register_operand")
+ (match_operator:SI 4 "modular_operator"
+ [(match_dup 0)
+ (match_operand:SI 2 "arith_operand")]))]
+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
+ [(set (match_dup 3)
+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
+
+;; Simplify -(int)a, etc.
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (truncate:SI (match_operand:DI 2 "register_operand")))
+ (set (match_operand:SI 3 "register_operand")
+ (match_operator:SI 4 "modular_operator"
+ [(match_operand:SI 1 "reg_or_0_operand")
+ (match_dup 0)]))]
+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
+ [(set (match_dup 3)
+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
+
+;; Simplify PIC loads to static variables.
+;; These will go away once we figure out how to emit auipc discretely.
+(define_insn "*local_pic_load"
+ [(set (match_operand:ANYI 0 "register_operand" "=r")
+ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
+ "\t%0,%1"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_load"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
+ (clobber (match_scratch:DI 2 "=&r"))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
+ "\t%0,%1,%2"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_load"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "TARGET_HARD_FLOAT && !TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
+ "\t%0,%1,%2"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_loadu"
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
+ (zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
+ "u\t%0,%1"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_storedi"
+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:DI 2 "=&r"))]
+ "TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
+ "\t%z1,%0,%2"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_storesi"
+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "!TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
+ "\t%z1,%0,%2"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_storedi"
+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
+ (match_operand:ANYF 1 "register_operand" "f"))
+ (clobber (match_scratch:DI 2 "=&r"))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
+ "\t%1,%0,%2"
+ [(set (attr "length") (const_int 8))])
+(define_insn "*local_pic_storesi"
+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
+ (match_operand:ANYF 1 "register_operand" "f"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "TARGET_HARD_FLOAT && !TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
+ "\t%1,%0,%2"
+ [(set (attr "length") (const_int 8))])
diff -rNU3 dist.orig/gcc/config/riscv/predicates.md dist/gcc/config/riscv/predicates.md
--- dist.orig/gcc/config/riscv/predicates.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/predicates.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,182 @@
+;; Predicate description for RISC-V target.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+;; Based on MIPS target for GNU compiler.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+(define_predicate "const_arith_operand"
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND (INTVAL (op))")))
+
+(define_predicate "arith_operand"
+ (ior (match_operand 0 "const_arith_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "sle_operand"
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
+
+(define_predicate "sleu_operand"
+ (and (match_operand 0 "sle_operand")
+ (match_test "INTVAL (op) + 1 != 0")))
+
+(define_predicate "const_0_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
+(define_predicate "reg_or_0_operand"
+ (ior (match_operand 0 "const_0_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "const_1_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST1_RTX (GET_MODE (op))")))
+
+(define_predicate "reg_or_1_operand"
+ (ior (match_operand 0 "const_1_operand")
+ (match_operand 0 "register_operand")))
+
+;; This is used for indexing into vectors, and hence only accepts const_int.
+(define_predicate "const_0_or_1_operand"
+ (and (match_code "const_int")
+ (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
+ (match_test "op == CONST1_RTX (GET_MODE (op))"))))
+
+(define_special_predicate "pc_or_label_operand"
+ (match_code "pc,label_ref"))
+
+;; A legitimate CONST_INT operand that takes more than one instruction
+;; to load.
+(define_predicate "splittable_const_int_operand"
+ (match_code "const_int")
+{
+ /* Don't handle multi-word moves this way; we don't want to introduce
+ the individual word-mode moves until after reload. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return false;
+
+ /* Otherwise check whether the constant can be loaded in a single
+ instruction. */
+ return !LUI_INT (op) && !SMALL_INT (op);
+})
+
+(define_predicate "move_operand"
+ (match_operand 0 "general_operand")
+{
+ enum riscv_symbol_type symbol_type;
+
+ /* The thinking here is as follows:
+
+ (1) The move expanders should split complex load sequences into
+ individual instructions. Those individual instructions can
+ then be optimized by all rtl passes.
+
+ (2) The target of pre-reload load sequences should not be used
+ to store temporary results. If the target register is only
+ assigned one value, reload can rematerialize that value
+ on demand, rather than spill it to the stack.
+
+ (3) If we allowed pre-reload passes like combine and cse to recreate
+ complex load sequences, we would want to be able to split the
+ sequences before reload as well, so that the pre-reload scheduler
+ can see the individual instructions. This falls foul of (2);
+ the splitter would be forced to reuse the target register for
+ intermediate results.
+
+ (4) We want to define complex load splitters for combine. These
+ splitters can request a temporary scratch register, which avoids
+ the problem in (2). They allow things like:
+
+ (set (reg T1) (high SYM))
+ (set (reg T2) (low (reg T1) SYM))
+ (set (reg X) (plus (reg T2) (const_int OFFSET)))
+
+ to be combined into:
+
+ (set (reg T3) (high SYM+OFFSET))
+ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
+
+ if T2 is only used this once. */
+ switch (GET_CODE (op))
+ {
+ case CONST_INT:
+ return !splittable_const_int_operand (op, mode);
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return (riscv_symbolic_constant_p (op, &symbol_type)
+ && !riscv_hi_relocs[symbol_type]);
+
+ case HIGH:
+ op = XEXP (op, 0);
+ return riscv_symbolic_constant_p (op, &symbol_type);
+
+ default:
+ return true;
+ }
+})
+
+(define_predicate "consttable_operand"
+ (match_test "CONSTANT_P (op)"))
+
+(define_predicate "symbolic_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum riscv_symbol_type type;
+ return riscv_symbolic_constant_p (op, &type);
+})
+
+(define_predicate "absolute_symbolic_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum riscv_symbol_type type;
+ return (riscv_symbolic_constant_p (op, &type)
+ && type == SYMBOL_ABSOLUTE);
+})
+
+(define_predicate "plt_symbolic_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum riscv_symbol_type type;
+ return (riscv_symbolic_constant_p (op, &type)
+ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
+})
+
+(define_predicate "call_insn_operand"
+ (ior (match_operand 0 "absolute_symbolic_operand")
+ (match_operand 0 "plt_symbolic_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "symbol_ref_operand"
+ (match_code "symbol_ref"))
+
+(define_predicate "modular_operator"
+ (match_code "plus,minus,mult,ashift"))
+
+(define_predicate "equality_operator"
+ (match_code "eq,ne"))
+
+(define_predicate "order_operator"
+ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
+
+(define_predicate "fp_order_operator"
+ (match_code "eq,lt,le,gt,ge"))
+
+(define_predicate "fp_unorder_operator"
+ (match_code "ordered,unordered"))
diff -rNU3 dist.orig/gcc/config/riscv/riscv-ftypes.def dist/gcc/config/riscv/riscv-ftypes.def
--- dist.orig/gcc/config/riscv/riscv-ftypes.def 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv-ftypes.def 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,39 @@
+/* Definitions of prototypes for RISC-V built-in functions.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+ Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
+ MIPS built-in functions, where:
+
+ NARGS is the number of arguments.
+ LIST contains the return-type code followed by the codes for each
+ argument type.
+
+ Argument- and return-type codes are either modes or one of the following:
+
+ VOID for void_type_node
+ INT for integer_type_node
+ POINTER for ptr_type_node
+
+ (we don't use PTR because that's a ANSI-compatibillity macro).
+
+ Please keep this list lexicographically sorted by the LIST argument. */
+
+DEF_RISCV_FTYPE (1, (VOID, VOID))
diff -rNU3 dist.orig/gcc/config/riscv/riscv-modes.def dist/gcc/config/riscv/riscv-modes.def
--- dist.orig/gcc/config/riscv/riscv-modes.def 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv-modes.def 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,26 @@
+/* Extra machine modes for RISC-V target.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+ Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 4); /* V8QI V4HI V2SI */
+VECTOR_MODES (FLOAT, 4); /* V4HF V2SF */
diff -rNU3 dist.orig/gcc/config/riscv/riscv-opc.h dist/gcc/config/riscv/riscv-opc.h
--- dist.orig/gcc/config/riscv/riscv-opc.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv-opc.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,1195 @@
+/* Automatically generated by parse-opcodes */
+#ifndef RISCV_ENCODING_H
+#define RISCV_ENCODING_H
+#define MATCH_ADD 0x33
+#define MASK_ADD 0xfe00707f
+#define MATCH_ADDI 0x13
+#define MASK_ADDI 0x707f
+#define MATCH_ADDIW 0x1b
+#define MASK_ADDIW 0x707f
+#define MATCH_ADDW 0x3b
+#define MASK_ADDW 0xfe00707f
+#define MATCH_AMOADD_D 0x302f
+#define MASK_AMOADD_D 0xf800707f
+#define MATCH_AMOADD_W 0x202f
+#define MASK_AMOADD_W 0xf800707f
+#define MATCH_AMOAND_D 0x6000302f
+#define MASK_AMOAND_D 0xf800707f
+#define MATCH_AMOAND_W 0x6000202f
+#define MASK_AMOAND_W 0xf800707f
+#define MATCH_AMOMAX_D 0xa000302f
+#define MASK_AMOMAX_D 0xf800707f
+#define MATCH_AMOMAX_W 0xa000202f
+#define MASK_AMOMAX_W 0xf800707f
+#define MATCH_AMOMAXU_D 0xe000302f
+#define MASK_AMOMAXU_D 0xf800707f
+#define MATCH_AMOMAXU_W 0xe000202f
+#define MASK_AMOMAXU_W 0xf800707f
+#define MATCH_AMOMIN_D 0x8000302f
+#define MASK_AMOMIN_D 0xf800707f
+#define MATCH_AMOMIN_W 0x8000202f
+#define MASK_AMOMIN_W 0xf800707f
+#define MATCH_AMOMINU_D 0xc000302f
+#define MASK_AMOMINU_D 0xf800707f
+#define MATCH_AMOMINU_W 0xc000202f
+#define MASK_AMOMINU_W 0xf800707f
+#define MATCH_AMOOR_D 0x4000302f
+#define MASK_AMOOR_D 0xf800707f
+#define MATCH_AMOOR_W 0x4000202f
+#define MASK_AMOOR_W 0xf800707f
+#define MATCH_AMOSWAP_D 0x800302f
+#define MASK_AMOSWAP_D 0xf800707f
+#define MATCH_AMOSWAP_W 0x800202f
+#define MASK_AMOSWAP_W 0xf800707f
+#define MATCH_AMOXOR_D 0x2000302f
+#define MASK_AMOXOR_D 0xf800707f
+#define MATCH_AMOXOR_W 0x2000202f
+#define MASK_AMOXOR_W 0xf800707f
+#define MATCH_AND 0x7033
+#define MASK_AND 0xfe00707f
+#define MATCH_ANDI 0x7013
+#define MASK_ANDI 0x707f
+#define MATCH_AUIPC 0x17
+#define MASK_AUIPC 0x7f
+#define MATCH_BEQ 0x63
+#define MASK_BEQ 0x707f
+#define MATCH_BGE 0x5063
+#define MASK_BGE 0x707f
+#define MATCH_BGEU 0x7063
+#define MASK_BGEU 0x707f
+#define MATCH_BLT 0x4063
+#define MASK_BLT 0x707f
+#define MATCH_BLTU 0x6063
+#define MASK_BLTU 0x707f
+#define MATCH_BNE 0x1063
+#define MASK_BNE 0x707f
+#define MATCH_C_ADD 0x6000
+#define MASK_C_ADD 0xf003
+#define MATCH_C_ADDI 0x8000
+#define MASK_C_ADDI 0xe003
+#define MATCH_C_ADDI4 0xa000
+#define MASK_C_ADDI4 0xe003
+#define MATCH_C_ADDIW 0xe000
+#define MASK_C_ADDIW 0xe003
+#define MATCH_C_ADDW 0x7000
+#define MASK_C_ADDW 0xf003
+#define MATCH_C_BEQZ 0x2002
+#define MASK_C_BEQZ 0xe003
+#define MATCH_C_BNEZ 0x6002
+#define MASK_C_BNEZ 0xe003
+#define MATCH_C_J 0xa002
+#define MASK_C_J 0xe003
+#define MATCH_C_JALR 0x5000
+#define MASK_C_JALR 0xf003
+#define MATCH_C_LD 0x2001
+#define MASK_C_LD 0xe003
+#define MATCH_C_LDSP 0xc001
+#define MASK_C_LDSP 0xe003
+#define MATCH_C_LI 0x0
+#define MASK_C_LI 0xe003
+#define MATCH_C_LUI 0x2000
+#define MASK_C_LUI 0xe003
+#define MATCH_C_LW 0x1
+#define MASK_C_LW 0xe003
+#define MATCH_C_LWSP 0x8001
+#define MASK_C_LWSP 0xe003
+#define MATCH_C_MV 0x4000
+#define MASK_C_MV 0xf003
+#define MATCH_C_SD 0x6001
+#define MASK_C_SD 0xe003
+#define MATCH_C_SDSP 0xe001
+#define MASK_C_SDSP 0xe003
+#define MATCH_C_SLLI 0xc000
+#define MASK_C_SLLI 0xe003
+#define MATCH_C_SW 0x4001
+#define MASK_C_SW 0xe003
+#define MATCH_C_SWSP 0xa001
+#define MASK_C_SWSP 0xe003
+#define MATCH_CSRRC 0x3073
+#define MASK_CSRRC 0x707f
+#define MATCH_CSRRCI 0x7073
+#define MASK_CSRRCI 0x707f
+#define MATCH_CSRRS 0x2073
+#define MASK_CSRRS 0x707f
+#define MATCH_CSRRSI 0x6073
+#define MASK_CSRRSI 0x707f
+#define MATCH_CSRRW 0x1073
+#define MASK_CSRRW 0x707f
+#define MATCH_CSRRWI 0x5073
+#define MASK_CSRRWI 0x707f
+#define MATCH_CUSTOM0 0xb
+#define MASK_CUSTOM0 0x707f
+#define MATCH_CUSTOM0_RD 0x400b
+#define MASK_CUSTOM0_RD 0x707f
+#define MATCH_CUSTOM0_RD_RS1 0x600b
+#define MASK_CUSTOM0_RD_RS1 0x707f
+#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
+#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM0_RS1 0x200b
+#define MASK_CUSTOM0_RS1 0x707f
+#define MATCH_CUSTOM0_RS1_RS2 0x300b
+#define MASK_CUSTOM0_RS1_RS2 0x707f
+#define MATCH_CUSTOM1 0x2b
+#define MASK_CUSTOM1 0x707f
+#define MATCH_CUSTOM1_RD 0x402b
+#define MASK_CUSTOM1_RD 0x707f
+#define MATCH_CUSTOM1_RD_RS1 0x602b
+#define MASK_CUSTOM1_RD_RS1 0x707f
+#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
+#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM1_RS1 0x202b
+#define MASK_CUSTOM1_RS1 0x707f
+#define MATCH_CUSTOM1_RS1_RS2 0x302b
+#define MASK_CUSTOM1_RS1_RS2 0x707f
+#define MATCH_CUSTOM2 0x5b
+#define MASK_CUSTOM2 0x707f
+#define MATCH_CUSTOM2_RD 0x405b
+#define MASK_CUSTOM2_RD 0x707f
+#define MATCH_CUSTOM2_RD_RS1 0x605b
+#define MASK_CUSTOM2_RD_RS1 0x707f
+#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
+#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM2_RS1 0x205b
+#define MASK_CUSTOM2_RS1 0x707f
+#define MATCH_CUSTOM2_RS1_RS2 0x305b
+#define MASK_CUSTOM2_RS1_RS2 0x707f
+#define MATCH_CUSTOM3 0x7b
+#define MASK_CUSTOM3 0x707f
+#define MATCH_CUSTOM3_RD 0x407b
+#define MASK_CUSTOM3_RD 0x707f
+#define MATCH_CUSTOM3_RD_RS1 0x607b
+#define MASK_CUSTOM3_RD_RS1 0x707f
+#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
+#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM3_RS1 0x207b
+#define MASK_CUSTOM3_RS1 0x707f
+#define MATCH_CUSTOM3_RS1_RS2 0x307b
+#define MASK_CUSTOM3_RS1_RS2 0x707f
+#define MATCH_DIV 0x2004033
+#define MASK_DIV 0xfe00707f
+#define MATCH_DIVU 0x2005033
+#define MASK_DIVU 0xfe00707f
+#define MATCH_DIVUW 0x200503b
+#define MASK_DIVUW 0xfe00707f
+#define MATCH_DIVW 0x200403b
+#define MASK_DIVW 0xfe00707f
+#define MATCH_FADD_D 0x2000053
+#define MASK_FADD_D 0xfe00007f
+#define MATCH_FADD_H 0x4000053
+#define MASK_FADD_H 0xfe00007f
+#define MATCH_FADD_S 0x53
+#define MASK_FADD_S 0xfe00007f
+#define MATCH_FCLASS_D 0xe2001053
+#define MASK_FCLASS_D 0xfff0707f
+#define MATCH_FCLASS_S 0xe0001053
+#define MASK_FCLASS_S 0xfff0707f
+#define MATCH_FCVT_D_H 0x8c000053
+#define MASK_FCVT_D_H 0xfff0007f
+#define MATCH_FCVT_D_L 0xd2200053
+#define MASK_FCVT_D_L 0xfff0007f
+#define MATCH_FCVT_D_LU 0xd2300053
+#define MASK_FCVT_D_LU 0xfff0007f
+#define MATCH_FCVT_D_S 0x42000053
+#define MASK_FCVT_D_S 0xfff0007f
+#define MATCH_FCVT_D_W 0xd2000053
+#define MASK_FCVT_D_W 0xfff0007f
+#define MATCH_FCVT_D_WU 0xd2100053
+#define MASK_FCVT_D_WU 0xfff0007f
+#define MATCH_FCVT_H_D 0x92000053
+#define MASK_FCVT_H_D 0xfff0007f
+#define MATCH_FCVT_H_L 0x64000053
+#define MASK_FCVT_H_L 0xfff0007f
+#define MATCH_FCVT_H_LU 0x6c000053
+#define MASK_FCVT_H_LU 0xfff0007f
+#define MATCH_FCVT_H_S 0x90000053
+#define MASK_FCVT_H_S 0xfff0007f
+#define MATCH_FCVT_H_W 0x74000053
+#define MASK_FCVT_H_W 0xfff0007f
+#define MATCH_FCVT_H_WU 0x7c000053
+#define MASK_FCVT_H_WU 0xfff0007f
+#define MATCH_FCVT_L_D 0xc2200053
+#define MASK_FCVT_L_D 0xfff0007f
+#define MATCH_FCVT_L_H 0x44000053
+#define MASK_FCVT_L_H 0xfff0007f
+#define MATCH_FCVT_L_S 0xc0200053
+#define MASK_FCVT_L_S 0xfff0007f
+#define MATCH_FCVT_LU_D 0xc2300053
+#define MASK_FCVT_LU_D 0xfff0007f
+#define MATCH_FCVT_LU_H 0x4c000053
+#define MASK_FCVT_LU_H 0xfff0007f
+#define MATCH_FCVT_LU_S 0xc0300053
+#define MASK_FCVT_LU_S 0xfff0007f
+#define MATCH_FCVT_S_D 0x40100053
+#define MASK_FCVT_S_D 0xfff0007f
+#define MATCH_FCVT_S_H 0x84000053
+#define MASK_FCVT_S_H 0xfff0007f
+#define MATCH_FCVT_S_L 0xd0200053
+#define MASK_FCVT_S_L 0xfff0007f
+#define MATCH_FCVT_S_LU 0xd0300053
+#define MASK_FCVT_S_LU 0xfff0007f
+#define MATCH_FCVT_S_W 0xd0000053
+#define MASK_FCVT_S_W 0xfff0007f
+#define MATCH_FCVT_S_WU 0xd0100053
+#define MASK_FCVT_S_WU 0xfff0007f
+#define MATCH_FCVT_W_D 0xc2000053
+#define MASK_FCVT_W_D 0xfff0007f
+#define MATCH_FCVT_W_H 0x54000053
+#define MASK_FCVT_W_H 0xfff0007f
+#define MATCH_FCVT_W_S 0xc0000053
+#define MASK_FCVT_W_S 0xfff0007f
+#define MATCH_FCVT_WU_D 0xc2100053
+#define MASK_FCVT_WU_D 0xfff0007f
+#define MATCH_FCVT_WU_H 0x5c000053
+#define MASK_FCVT_WU_H 0xfff0007f
+#define MATCH_FCVT_WU_S 0xc0100053
+#define MASK_FCVT_WU_S 0xfff0007f
+#define MATCH_FDIV_D 0x1a000053
+#define MASK_FDIV_D 0xfe00007f
+#define MATCH_FDIV_H 0x1c000053
+#define MASK_FDIV_H 0xfe00007f
+#define MATCH_FDIV_S 0x18000053
+#define MASK_FDIV_S 0xfe00007f
+#define MATCH_FENCE 0xf
+#define MASK_FENCE 0x707f
+#define MATCH_FENCE_I 0x100f
+#define MASK_FENCE_I 0x707f
+#define MATCH_FEQ_D 0xa2002053
+#define MASK_FEQ_D 0xfe00707f
+#define MATCH_FEQ_H 0xac000053
+#define MASK_FEQ_H 0xfe00707f
+#define MATCH_FEQ_S 0xa0002053
+#define MASK_FEQ_S 0xfe00707f
+#define MATCH_FLD 0x3007
+#define MASK_FLD 0x707f
+#define MATCH_FLE_D 0xa2000053
+#define MASK_FLE_D 0xfe00707f
+#define MATCH_FLE_H 0xbc000053
+#define MASK_FLE_H 0xfe00707f
+#define MATCH_FLE_S 0xa0000053
+#define MASK_FLE_S 0xfe00707f
+#define MATCH_FLH 0x1007
+#define MASK_FLH 0x707f
+#define MATCH_FLT_D 0xa2001053
+#define MASK_FLT_D 0xfe00707f
+#define MATCH_FLT_H 0xb4000053
+#define MASK_FLT_H 0xfe00707f
+#define MATCH_FLT_S 0xa0001053
+#define MASK_FLT_S 0xfe00707f
+#define MATCH_FLW 0x2007
+#define MASK_FLW 0x707f
+#define MATCH_FMADD_D 0x2000043
+#define MASK_FMADD_D 0x600007f
+#define MATCH_FMADD_H 0x4000043
+#define MASK_FMADD_H 0x600007f
+#define MATCH_FMADD_S 0x43
+#define MASK_FMADD_S 0x600007f
+#define MATCH_FMAX_D 0x2a001053
+#define MASK_FMAX_D 0xfe00707f
+#define MATCH_FMAX_H 0xcc000053
+#define MASK_FMAX_H 0xfe00707f
+#define MATCH_FMAX_S 0x28001053
+#define MASK_FMAX_S 0xfe00707f
+#define MATCH_FMIN_D 0x2a000053
+#define MASK_FMIN_D 0xfe00707f
+#define MATCH_FMIN_H 0xc4000053
+#define MASK_FMIN_H 0xfe00707f
+#define MATCH_FMIN_S 0x28000053
+#define MASK_FMIN_S 0xfe00707f
+#define MATCH_FMOVN 0x6007077
+#define MASK_FMOVN 0xfe00707f
+#define MATCH_FMOVZ 0x4007077
+#define MASK_FMOVZ 0xfe00707f
+#define MATCH_FMSUB_D 0x2000047
+#define MASK_FMSUB_D 0x600007f
+#define MATCH_FMSUB_H 0x4000047
+#define MASK_FMSUB_H 0x600007f
+#define MATCH_FMSUB_S 0x47
+#define MASK_FMSUB_S 0x600007f
+#define MATCH_FMUL_D 0x12000053
+#define MASK_FMUL_D 0xfe00007f
+#define MATCH_FMUL_H 0x14000053
+#define MASK_FMUL_H 0xfe00007f
+#define MATCH_FMUL_S 0x10000053
+#define MASK_FMUL_S 0xfe00007f
+#define MATCH_FMV_D_X 0xf2000053
+#define MASK_FMV_D_X 0xfff0707f
+#define MATCH_FMV_H_X 0xf4000053
+#define MASK_FMV_H_X 0xfff0707f
+#define MATCH_FMV_S_X 0xf0000053
+#define MASK_FMV_S_X 0xfff0707f
+#define MATCH_FMV_X_D 0xe2000053
+#define MASK_FMV_X_D 0xfff0707f
+#define MATCH_FMV_X_H 0xe4000053
+#define MASK_FMV_X_H 0xfff0707f
+#define MATCH_FMV_X_S 0xe0000053
+#define MASK_FMV_X_S 0xfff0707f
+#define MATCH_FNMADD_D 0x200004f
+#define MASK_FNMADD_D 0x600007f
+#define MATCH_FNMADD_H 0x400004f
+#define MASK_FNMADD_H 0x600007f
+#define MATCH_FNMADD_S 0x4f
+#define MASK_FNMADD_S 0x600007f
+#define MATCH_FNMSUB_D 0x200004b
+#define MASK_FNMSUB_D 0x600007f
+#define MATCH_FNMSUB_H 0x400004b
+#define MASK_FNMSUB_H 0x600007f
+#define MATCH_FNMSUB_S 0x4b
+#define MASK_FNMSUB_S 0x600007f
+#define MATCH_FRCSR 0x302073
+#define MASK_FRCSR 0xfffff07f
+#define MATCH_FRFLAGS 0x102073
+#define MASK_FRFLAGS 0xfffff07f
+#define MATCH_FRRM 0x202073
+#define MASK_FRRM 0xfffff07f
+#define MATCH_FSCSR 0x301073
+#define MASK_FSCSR 0xfff0707f
+#define MATCH_FSD 0x3027
+#define MASK_FSD 0x707f
+#define MATCH_FSFLAGS 0x101073
+#define MASK_FSFLAGS 0xfff0707f
+#define MATCH_FSFLAGSI 0x105073
+#define MASK_FSFLAGSI 0xfff0707f
+#define MATCH_FSGNJ_D 0x22000053
+#define MASK_FSGNJ_D 0xfe00707f
+#define MATCH_FSGNJ_H 0x2c000053
+#define MASK_FSGNJ_H 0xfe00707f
+#define MATCH_FSGNJ_S 0x20000053
+#define MASK_FSGNJ_S 0xfe00707f
+#define MATCH_FSGNJN_D 0x22001053
+#define MASK_FSGNJN_D 0xfe00707f
+#define MATCH_FSGNJN_H 0x34000053
+#define MASK_FSGNJN_H 0xfe00707f
+#define MATCH_FSGNJN_S 0x20001053
+#define MASK_FSGNJN_S 0xfe00707f
+#define MATCH_FSGNJX_D 0x22002053
+#define MASK_FSGNJX_D 0xfe00707f
+#define MATCH_FSGNJX_H 0x3c000053
+#define MASK_FSGNJX_H 0xfe00707f
+#define MATCH_FSGNJX_S 0x20002053
+#define MASK_FSGNJX_S 0xfe00707f
+#define MATCH_FSH 0x1027
+#define MASK_FSH 0x707f
+#define MATCH_FSQRT_D 0x5a000053
+#define MASK_FSQRT_D 0xfff0007f
+#define MATCH_FSQRT_H 0x24000053
+#define MASK_FSQRT_H 0xfff0007f
+#define MATCH_FSQRT_S 0x58000053
+#define MASK_FSQRT_S 0xfff0007f
+#define MATCH_FSRM 0x201073
+#define MASK_FSRM 0xfff0707f
+#define MATCH_FSRMI 0x205073
+#define MASK_FSRMI 0xfff0707f
+#define MATCH_FSUB_D 0xa000053
+#define MASK_FSUB_D 0xfe00007f
+#define MATCH_FSUB_H 0xc000053
+#define MASK_FSUB_H 0xfe00007f
+#define MATCH_FSUB_S 0x8000053
+#define MASK_FSUB_S 0xfe00007f
+#define MATCH_FSW 0x2027
+#define MASK_FSW 0x707f
+#define MATCH_JAL 0x6f
+#define MASK_JAL 0x7f
+#define MATCH_JALR 0x67
+#define MASK_JALR 0x707f
+#define MATCH_LB 0x3
+#define MASK_LB 0x707f
+#define MATCH_LBU 0x4003
+#define MASK_LBU 0x707f
+#define MATCH_LD 0x3003
+#define MASK_LD 0x707f
+#define MATCH_LH 0x1003
+#define MASK_LH 0x707f
+#define MATCH_LHU 0x5003
+#define MASK_LHU 0x707f
+#define MATCH_LR_D 0x1000302f
+#define MASK_LR_D 0xf9f0707f
+#define MATCH_LR_W 0x1000202f
+#define MASK_LR_W 0xf9f0707f
+#define MATCH_LUI 0x37
+#define MASK_LUI 0x7f
+#define MATCH_LW 0x2003
+#define MASK_LW 0x707f
+#define MATCH_LWU 0x6003
+#define MASK_LWU 0x707f
+#define MATCH_MOVN 0x2007077
+#define MASK_MOVN 0xfe00707f
+#define MATCH_MOVZ 0x7077
+#define MASK_MOVZ 0xfe00707f
+#define MATCH_MRTS 0x30500073
+#define MASK_MRTS 0xffffffff
+#define MATCH_MUL 0x2000033
+#define MASK_MUL 0xfe00707f
+#define MATCH_MULH 0x2001033
+#define MASK_MULH 0xfe00707f
+#define MATCH_MULHSU 0x2002033
+#define MASK_MULHSU 0xfe00707f
+#define MATCH_MULHU 0x2003033
+#define MASK_MULHU 0xfe00707f
+#define MATCH_MULW 0x200003b
+#define MASK_MULW 0xfe00707f
+#define MATCH_OR 0x6033
+#define MASK_OR 0xfe00707f
+#define MATCH_ORI 0x6013
+#define MASK_ORI 0x707f
+#define MATCH_RDCYCLE 0xc0002073
+#define MASK_RDCYCLE 0xfffff07f
+#define MATCH_RDCYCLEH 0xc8002073
+#define MASK_RDCYCLEH 0xfffff07f
+#define MATCH_RDINSTRET 0xc0202073
+#define MASK_RDINSTRET 0xfffff07f
+#define MATCH_RDINSTRETH 0xc8202073
+#define MASK_RDINSTRETH 0xfffff07f
+#define MATCH_RDTIME 0xc0102073
+#define MASK_RDTIME 0xfffff07f
+#define MATCH_RDTIMEH 0xc8102073
+#define MASK_RDTIMEH 0xfffff07f
+#define MATCH_REM 0x2006033
+#define MASK_REM 0xfe00707f
+#define MATCH_REMU 0x2007033
+#define MASK_REMU 0xfe00707f
+#define MATCH_REMUW 0x200703b
+#define MASK_REMUW 0xfe00707f
+#define MATCH_REMW 0x200603b
+#define MASK_REMW 0xfe00707f
+#define MATCH_SB 0x23
+#define MASK_SB 0x707f
+#define MATCH_SBREAK 0x100073
+#define MASK_SBREAK 0xffffffff
+#define MATCH_SC_D 0x1800302f
+#define MASK_SC_D 0xf800707f
+#define MATCH_SC_W 0x1800202f
+#define MASK_SC_W 0xf800707f
+#define MATCH_SCALL 0x73
+#define MASK_SCALL 0xffffffff
+#define MATCH_SD 0x3023
+#define MASK_SD 0x707f
+#define MATCH_SFENCE_VM 0x10100073
+#define MASK_SFENCE_VM 0xfff07fff
+#define MATCH_SH 0x1023
+#define MASK_SH 0x707f
+#define MATCH_SLL 0x1033
+#define MASK_SLL 0xfe00707f
+#define MATCH_SLLI 0x1013
+#define MASK_SLLI 0xfc00707f
+#define MATCH_SLLI_RV32 0x1013
+#define MASK_SLLI_RV32 0xfe00707f
+#define MATCH_SLLIW 0x101b
+#define MASK_SLLIW 0xfe00707f
+#define MATCH_SLLW 0x103b
+#define MASK_SLLW 0xfe00707f
+#define MATCH_SLT 0x2033
+#define MASK_SLT 0xfe00707f
+#define MATCH_SLTI 0x2013
+#define MASK_SLTI 0x707f
+#define MATCH_SLTIU 0x3013
+#define MASK_SLTIU 0x707f
+#define MATCH_SLTU 0x3033
+#define MASK_SLTU 0xfe00707f
+#define MATCH_SRA 0x40005033
+#define MASK_SRA 0xfe00707f
+#define MATCH_SRAI 0x40005013
+#define MASK_SRAI 0xfc00707f
+#define MATCH_SRAI_RV32 0x40005013
+#define MASK_SRAI_RV32 0xfe00707f
+#define MATCH_SRAIW 0x4000501b
+#define MASK_SRAIW 0xfe00707f
+#define MATCH_SRAW 0x4000503b
+#define MASK_SRAW 0xfe00707f
+#define MATCH_SRET 0x10000073
+#define MASK_SRET 0xffffffff
+#define MATCH_SRL 0x5033
+#define MASK_SRL 0xfe00707f
+#define MATCH_SRLI 0x5013
+#define MASK_SRLI 0xfc00707f
+#define MATCH_SRLI_RV32 0x5013
+#define MASK_SRLI_RV32 0xfe00707f
+#define MATCH_SRLIW 0x501b
+#define MASK_SRLIW 0xfe00707f
+#define MATCH_SRLW 0x503b
+#define MASK_SRLW 0xfe00707f
+#define MATCH_STOP 0x5077
+#define MASK_STOP 0xffffffff
+#define MATCH_SUB 0x40000033
+#define MASK_SUB 0xfe00707f
+#define MATCH_SUBW 0x4000003b
+#define MASK_SUBW 0xfe00707f
+#define MATCH_SW 0x2023
+#define MASK_SW 0x707f
+#define MATCH_UTIDX 0x6077
+#define MASK_UTIDX 0xfffff07f
+#define MATCH_VENQCMD 0xa00302b
+#define MASK_VENQCMD 0xfe007fff
+#define MATCH_VENQCNT 0x1000302b
+#define MASK_VENQCNT 0xfe007fff
+#define MATCH_VENQIMM1 0xc00302b
+#define MASK_VENQIMM1 0xfe007fff
+#define MATCH_VENQIMM2 0xe00302b
+#define MASK_VENQIMM2 0xfe007fff
+#define MATCH_VF 0x10202b
+#define MASK_VF 0x1f0707f
+#define MATCH_VFLD 0x1600205b
+#define MASK_VFLD 0xfff0707f
+#define MATCH_VFLSEGD 0x1600205b
+#define MASK_VFLSEGD 0x1ff0707f
+#define MATCH_VFLSEGSTD 0x1600305b
+#define MASK_VFLSEGSTD 0x1e00707f
+#define MATCH_VFLSEGSTW 0x1400305b
+#define MASK_VFLSEGSTW 0x1e00707f
+#define MATCH_VFLSEGW 0x1400205b
+#define MASK_VFLSEGW 0x1ff0707f
+#define MATCH_VFLSTD 0x1600305b
+#define MASK_VFLSTD 0xfe00707f
+#define MATCH_VFLSTW 0x1400305b
+#define MASK_VFLSTW 0xfe00707f
+#define MATCH_VFLW 0x1400205b
+#define MASK_VFLW 0xfff0707f
+#define MATCH_VFMSV_D 0x1200202b
+#define MASK_VFMSV_D 0xfff0707f
+#define MATCH_VFMSV_S 0x1000202b
+#define MASK_VFMSV_S 0xfff0707f
+#define MATCH_VFMVV 0x1000002b
+#define MASK_VFMVV 0xfff0707f
+#define MATCH_VFSD 0x1600207b
+#define MASK_VFSD 0xfff0707f
+#define MATCH_VFSSEGD 0x1600207b
+#define MASK_VFSSEGD 0x1ff0707f
+#define MATCH_VFSSEGSTD 0x1600307b
+#define MASK_VFSSEGSTD 0x1e00707f
+#define MATCH_VFSSEGSTW 0x1400307b
+#define MASK_VFSSEGSTW 0x1e00707f
+#define MATCH_VFSSEGW 0x1400207b
+#define MASK_VFSSEGW 0x1ff0707f
+#define MATCH_VFSSTD 0x1600307b
+#define MASK_VFSSTD 0xfe00707f
+#define MATCH_VFSSTW 0x1400307b
+#define MASK_VFSSTW 0xfe00707f
+#define MATCH_VFSW 0x1400207b
+#define MASK_VFSW 0xfff0707f
+#define MATCH_VGETCFG 0x400b
+#define MASK_VGETCFG 0xfffff07f
+#define MATCH_VGETVL 0x200400b
+#define MASK_VGETVL 0xfffff07f
+#define MATCH_VLB 0x205b
+#define MASK_VLB 0xfff0707f
+#define MATCH_VLBU 0x800205b
+#define MASK_VLBU 0xfff0707f
+#define MATCH_VLD 0x600205b
+#define MASK_VLD 0xfff0707f
+#define MATCH_VLH 0x200205b
+#define MASK_VLH 0xfff0707f
+#define MATCH_VLHU 0xa00205b
+#define MASK_VLHU 0xfff0707f
+#define MATCH_VLSEGB 0x205b
+#define MASK_VLSEGB 0x1ff0707f
+#define MATCH_VLSEGBU 0x800205b
+#define MASK_VLSEGBU 0x1ff0707f
+#define MATCH_VLSEGD 0x600205b
+#define MASK_VLSEGD 0x1ff0707f
+#define MATCH_VLSEGH 0x200205b
+#define MASK_VLSEGH 0x1ff0707f
+#define MATCH_VLSEGHU 0xa00205b
+#define MASK_VLSEGHU 0x1ff0707f
+#define MATCH_VLSEGSTB 0x305b
+#define MASK_VLSEGSTB 0x1e00707f
+#define MATCH_VLSEGSTBU 0x800305b
+#define MASK_VLSEGSTBU 0x1e00707f
+#define MATCH_VLSEGSTD 0x600305b
+#define MASK_VLSEGSTD 0x1e00707f
+#define MATCH_VLSEGSTH 0x200305b
+#define MASK_VLSEGSTH 0x1e00707f
+#define MATCH_VLSEGSTHU 0xa00305b
+#define MASK_VLSEGSTHU 0x1e00707f
+#define MATCH_VLSEGSTW 0x400305b
+#define MASK_VLSEGSTW 0x1e00707f
+#define MATCH_VLSEGSTWU 0xc00305b
+#define MASK_VLSEGSTWU 0x1e00707f
+#define MATCH_VLSEGW 0x400205b
+#define MASK_VLSEGW 0x1ff0707f
+#define MATCH_VLSEGWU 0xc00205b
+#define MASK_VLSEGWU 0x1ff0707f
+#define MATCH_VLSTB 0x305b
+#define MASK_VLSTB 0xfe00707f
+#define MATCH_VLSTBU 0x800305b
+#define MASK_VLSTBU 0xfe00707f
+#define MATCH_VLSTD 0x600305b
+#define MASK_VLSTD 0xfe00707f
+#define MATCH_VLSTH 0x200305b
+#define MASK_VLSTH 0xfe00707f
+#define MATCH_VLSTHU 0xa00305b
+#define MASK_VLSTHU 0xfe00707f
+#define MATCH_VLSTW 0x400305b
+#define MASK_VLSTW 0xfe00707f
+#define MATCH_VLSTWU 0xc00305b
+#define MASK_VLSTWU 0xfe00707f
+#define MATCH_VLW 0x400205b
+#define MASK_VLW 0xfff0707f
+#define MATCH_VLWU 0xc00205b
+#define MASK_VLWU 0xfff0707f
+#define MATCH_VMSV 0x200202b
+#define MASK_VMSV 0xfff0707f
+#define MATCH_VMVV 0x200002b
+#define MASK_VMVV 0xfff0707f
+#define MATCH_VSB 0x207b
+#define MASK_VSB 0xfff0707f
+#define MATCH_VSD 0x600207b
+#define MASK_VSD 0xfff0707f
+#define MATCH_VSETCFG 0x200b
+#define MASK_VSETCFG 0x7fff
+#define MATCH_VSETVL 0x600b
+#define MASK_VSETVL 0xfff0707f
+#define MATCH_VSH 0x200207b
+#define MASK_VSH 0xfff0707f
+#define MATCH_VSSEGB 0x207b
+#define MASK_VSSEGB 0x1ff0707f
+#define MATCH_VSSEGD 0x600207b
+#define MASK_VSSEGD 0x1ff0707f
+#define MATCH_VSSEGH 0x200207b
+#define MASK_VSSEGH 0x1ff0707f
+#define MATCH_VSSEGSTB 0x307b
+#define MASK_VSSEGSTB 0x1e00707f
+#define MATCH_VSSEGSTD 0x600307b
+#define MASK_VSSEGSTD 0x1e00707f
+#define MATCH_VSSEGSTH 0x200307b
+#define MASK_VSSEGSTH 0x1e00707f
+#define MATCH_VSSEGSTW 0x400307b
+#define MASK_VSSEGSTW 0x1e00707f
+#define MATCH_VSSEGW 0x400207b
+#define MASK_VSSEGW 0x1ff0707f
+#define MATCH_VSSTB 0x307b
+#define MASK_VSSTB 0xfe00707f
+#define MATCH_VSSTD 0x600307b
+#define MASK_VSSTD 0xfe00707f
+#define MATCH_VSSTH 0x200307b
+#define MASK_VSSTH 0xfe00707f
+#define MATCH_VSSTW 0x400307b
+#define MASK_VSSTW 0xfe00707f
+#define MATCH_VSW 0x400207b
+#define MASK_VSW 0xfff0707f
+#define MATCH_VXCPTAUX 0x200402b
+#define MASK_VXCPTAUX 0xfffff07f
+#define MATCH_VXCPTCAUSE 0x402b
+#define MASK_VXCPTCAUSE 0xfffff07f
+#define MATCH_VXCPTEVAC 0x600302b
+#define MASK_VXCPTEVAC 0xfff07fff
+#define MATCH_VXCPTHOLD 0x800302b
+#define MASK_VXCPTHOLD 0xfff07fff
+#define MATCH_VXCPTKILL 0x400302b
+#define MASK_VXCPTKILL 0xffffffff
+#define MATCH_VXCPTRESTORE 0x200302b
+#define MASK_VXCPTRESTORE 0xfff07fff
+#define MATCH_VXCPTSAVE 0x302b
+#define MASK_VXCPTSAVE 0xfff07fff
+#define MATCH_XOR 0x4033
+#define MASK_XOR 0xfe00707f
+#define MATCH_XORI 0x4013
+#define MASK_XORI 0x707f
+#define CSR_FFLAGS 0x1
+#define CSR_FRM 0x2
+#define CSR_FCSR 0x3
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_STATS 0xc0
+#define CSR_UARCH0 0xcc0
+#define CSR_UARCH1 0xcc1
+#define CSR_UARCH2 0xcc2
+#define CSR_UARCH3 0xcc3
+#define CSR_UARCH4 0xcc4
+#define CSR_UARCH5 0xcc5
+#define CSR_UARCH6 0xcc6
+#define CSR_UARCH7 0xcc7
+#define CSR_UARCH8 0xcc8
+#define CSR_UARCH9 0xcc9
+#define CSR_UARCH10 0xcca
+#define CSR_UARCH11 0xccb
+#define CSR_UARCH12 0xccc
+#define CSR_UARCH13 0xccd
+#define CSR_UARCH14 0xcce
+#define CSR_UARCH15 0xccf
+#define CSR_SSTATUS 0x100
+#define CSR_STVEC 0x101
+#define CSR_STIMECMP 0x121
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SPTBR 0x188
+#define CSR_SASID 0x189
+#define CSR_SCYCLE 0x900
+#define CSR_STIME 0x901
+#define CSR_SINSTRET 0x902
+#define CSR_SCAUSE 0xd40
+#define CSR_SBADADDR 0xd41
+#define CSR_MSTATUS 0x300
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MBADADDR 0x343
+#define CSR_RESET 0x780
+#define CSR_TOHOST 0x781
+#define CSR_FROMHOST 0x782
+#define CSR_SEND_IPI 0x783
+#define CSR_HARTID 0xfc0
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_SCYCLEH 0x980
+#define CSR_STIMEH 0x981
+#define CSR_SINSTRETH 0x982
+#define CAUSE_MISALIGNED_FETCH 0x0
+#define CAUSE_FAULT_FETCH 0x1
+#define CAUSE_ILLEGAL_INSTRUCTION 0x2
+#define CAUSE_MISALIGNED_LOAD 0x4
+#define CAUSE_FAULT_LOAD 0x5
+#define CAUSE_MISALIGNED_STORE 0x6
+#define CAUSE_FAULT_STORE 0x7
+#define CAUSE_ECALL 0x8
+#define CAUSE_BREAKPOINT 0x9
+#endif
+#ifdef DECLARE_INSN
+DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
+DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
+DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
+DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
+DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
+DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
+DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
+DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
+DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
+DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
+DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
+DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
+DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
+DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
+DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
+DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
+DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
+DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
+DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
+DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
+DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
+DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
+DECLARE_INSN(and, MATCH_AND, MASK_AND)
+DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
+DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
+DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
+DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
+DECLARE_INSN(c_addi4, MATCH_C_ADDI4, MASK_C_ADDI4)
+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
+DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
+DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
+DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
+DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
+DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
+DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
+DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
+DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
+DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
+DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
+DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
+DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
+DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
+DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
+DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
+DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
+DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
+DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
+DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
+DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
+DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
+DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
+DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
+DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
+DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
+DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
+DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
+DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
+DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
+DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
+DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
+DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
+DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
+DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
+DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
+DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
+DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
+DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
+DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
+DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
+DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
+DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
+DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
+DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
+DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
+DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
+DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
+DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
+DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
+DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
+DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
+DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
+DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
+DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
+DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
+DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
+DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
+DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
+DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
+DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
+DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
+DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
+DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
+DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
+DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
+DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
+DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
+DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
+DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
+DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
+DECLARE_INSN(fmovn, MATCH_FMOVN, MASK_FMOVN)
+DECLARE_INSN(fmovz, MATCH_FMOVZ, MASK_FMOVZ)
+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
+DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
+DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
+DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
+DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
+DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
+DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
+DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
+DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
+DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
+DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
+DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
+DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
+DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
+DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
+DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
+DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
+DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
+DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
+DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
+DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
+DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
+DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
+DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
+DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
+DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
+DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
+DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
+DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
+DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
+DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
+DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
+DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
+DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
+DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
+DECLARE_INSN(ld, MATCH_LD, MASK_LD)
+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
+DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
+DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
+DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
+DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
+DECLARE_INSN(movn, MATCH_MOVN, MASK_MOVN)
+DECLARE_INSN(movz, MATCH_MOVZ, MASK_MOVZ)
+DECLARE_INSN(mrts, MATCH_MRTS, MASK_MRTS)
+DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
+DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
+DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
+DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
+DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
+DECLARE_INSN(or, MATCH_OR, MASK_OR)
+DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
+DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
+DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
+DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
+DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
+DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
+DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
+DECLARE_INSN(rem, MATCH_REM, MASK_REM)
+DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
+DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
+DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
+DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
+DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
+DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
+DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
+DECLARE_INSN(sd, MATCH_SD, MASK_SD)
+DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM)
+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
+DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
+DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
+DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
+DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
+DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
+DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
+DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
+DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
+DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
+DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
+DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
+DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
+DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
+DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
+DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
+DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
+DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
+DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
+DECLARE_INSN(stop, MATCH_STOP, MASK_STOP)
+DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
+DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
+DECLARE_INSN(utidx, MATCH_UTIDX, MASK_UTIDX)
+DECLARE_INSN(venqcmd, MATCH_VENQCMD, MASK_VENQCMD)
+DECLARE_INSN(venqcnt, MATCH_VENQCNT, MASK_VENQCNT)
+DECLARE_INSN(venqimm1, MATCH_VENQIMM1, MASK_VENQIMM1)
+DECLARE_INSN(venqimm2, MATCH_VENQIMM2, MASK_VENQIMM2)
+DECLARE_INSN(vf, MATCH_VF, MASK_VF)
+DECLARE_INSN(vfld, MATCH_VFLD, MASK_VFLD)
+DECLARE_INSN(vflsegd, MATCH_VFLSEGD, MASK_VFLSEGD)
+DECLARE_INSN(vflsegstd, MATCH_VFLSEGSTD, MASK_VFLSEGSTD)
+DECLARE_INSN(vflsegstw, MATCH_VFLSEGSTW, MASK_VFLSEGSTW)
+DECLARE_INSN(vflsegw, MATCH_VFLSEGW, MASK_VFLSEGW)
+DECLARE_INSN(vflstd, MATCH_VFLSTD, MASK_VFLSTD)
+DECLARE_INSN(vflstw, MATCH_VFLSTW, MASK_VFLSTW)
+DECLARE_INSN(vflw, MATCH_VFLW, MASK_VFLW)
+DECLARE_INSN(vfmsv_d, MATCH_VFMSV_D, MASK_VFMSV_D)
+DECLARE_INSN(vfmsv_s, MATCH_VFMSV_S, MASK_VFMSV_S)
+DECLARE_INSN(vfmvv, MATCH_VFMVV, MASK_VFMVV)
+DECLARE_INSN(vfsd, MATCH_VFSD, MASK_VFSD)
+DECLARE_INSN(vfssegd, MATCH_VFSSEGD, MASK_VFSSEGD)
+DECLARE_INSN(vfssegstd, MATCH_VFSSEGSTD, MASK_VFSSEGSTD)
+DECLARE_INSN(vfssegstw, MATCH_VFSSEGSTW, MASK_VFSSEGSTW)
+DECLARE_INSN(vfssegw, MATCH_VFSSEGW, MASK_VFSSEGW)
+DECLARE_INSN(vfsstd, MATCH_VFSSTD, MASK_VFSSTD)
+DECLARE_INSN(vfsstw, MATCH_VFSSTW, MASK_VFSSTW)
+DECLARE_INSN(vfsw, MATCH_VFSW, MASK_VFSW)
+DECLARE_INSN(vgetcfg, MATCH_VGETCFG, MASK_VGETCFG)
+DECLARE_INSN(vgetvl, MATCH_VGETVL, MASK_VGETVL)
+DECLARE_INSN(vlb, MATCH_VLB, MASK_VLB)
+DECLARE_INSN(vlbu, MATCH_VLBU, MASK_VLBU)
+DECLARE_INSN(vld, MATCH_VLD, MASK_VLD)
+DECLARE_INSN(vlh, MATCH_VLH, MASK_VLH)
+DECLARE_INSN(vlhu, MATCH_VLHU, MASK_VLHU)
+DECLARE_INSN(vlsegb, MATCH_VLSEGB, MASK_VLSEGB)
+DECLARE_INSN(vlsegbu, MATCH_VLSEGBU, MASK_VLSEGBU)
+DECLARE_INSN(vlsegd, MATCH_VLSEGD, MASK_VLSEGD)
+DECLARE_INSN(vlsegh, MATCH_VLSEGH, MASK_VLSEGH)
+DECLARE_INSN(vlseghu, MATCH_VLSEGHU, MASK_VLSEGHU)
+DECLARE_INSN(vlsegstb, MATCH_VLSEGSTB, MASK_VLSEGSTB)
+DECLARE_INSN(vlsegstbu, MATCH_VLSEGSTBU, MASK_VLSEGSTBU)
+DECLARE_INSN(vlsegstd, MATCH_VLSEGSTD, MASK_VLSEGSTD)
+DECLARE_INSN(vlsegsth, MATCH_VLSEGSTH, MASK_VLSEGSTH)
+DECLARE_INSN(vlsegsthu, MATCH_VLSEGSTHU, MASK_VLSEGSTHU)
+DECLARE_INSN(vlsegstw, MATCH_VLSEGSTW, MASK_VLSEGSTW)
+DECLARE_INSN(vlsegstwu, MATCH_VLSEGSTWU, MASK_VLSEGSTWU)
+DECLARE_INSN(vlsegw, MATCH_VLSEGW, MASK_VLSEGW)
+DECLARE_INSN(vlsegwu, MATCH_VLSEGWU, MASK_VLSEGWU)
+DECLARE_INSN(vlstb, MATCH_VLSTB, MASK_VLSTB)
+DECLARE_INSN(vlstbu, MATCH_VLSTBU, MASK_VLSTBU)
+DECLARE_INSN(vlstd, MATCH_VLSTD, MASK_VLSTD)
+DECLARE_INSN(vlsth, MATCH_VLSTH, MASK_VLSTH)
+DECLARE_INSN(vlsthu, MATCH_VLSTHU, MASK_VLSTHU)
+DECLARE_INSN(vlstw, MATCH_VLSTW, MASK_VLSTW)
+DECLARE_INSN(vlstwu, MATCH_VLSTWU, MASK_VLSTWU)
+DECLARE_INSN(vlw, MATCH_VLW, MASK_VLW)
+DECLARE_INSN(vlwu, MATCH_VLWU, MASK_VLWU)
+DECLARE_INSN(vmsv, MATCH_VMSV, MASK_VMSV)
+DECLARE_INSN(vmvv, MATCH_VMVV, MASK_VMVV)
+DECLARE_INSN(vsb, MATCH_VSB, MASK_VSB)
+DECLARE_INSN(vsd, MATCH_VSD, MASK_VSD)
+DECLARE_INSN(vsetcfg, MATCH_VSETCFG, MASK_VSETCFG)
+DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
+DECLARE_INSN(vsh, MATCH_VSH, MASK_VSH)
+DECLARE_INSN(vssegb, MATCH_VSSEGB, MASK_VSSEGB)
+DECLARE_INSN(vssegd, MATCH_VSSEGD, MASK_VSSEGD)
+DECLARE_INSN(vssegh, MATCH_VSSEGH, MASK_VSSEGH)
+DECLARE_INSN(vssegstb, MATCH_VSSEGSTB, MASK_VSSEGSTB)
+DECLARE_INSN(vssegstd, MATCH_VSSEGSTD, MASK_VSSEGSTD)
+DECLARE_INSN(vssegsth, MATCH_VSSEGSTH, MASK_VSSEGSTH)
+DECLARE_INSN(vssegstw, MATCH_VSSEGSTW, MASK_VSSEGSTW)
+DECLARE_INSN(vssegw, MATCH_VSSEGW, MASK_VSSEGW)
+DECLARE_INSN(vsstb, MATCH_VSSTB, MASK_VSSTB)
+DECLARE_INSN(vsstd, MATCH_VSSTD, MASK_VSSTD)
+DECLARE_INSN(vssth, MATCH_VSSTH, MASK_VSSTH)
+DECLARE_INSN(vsstw, MATCH_VSSTW, MASK_VSSTW)
+DECLARE_INSN(vsw, MATCH_VSW, MASK_VSW)
+DECLARE_INSN(vxcptaux, MATCH_VXCPTAUX, MASK_VXCPTAUX)
+DECLARE_INSN(vxcptcause, MATCH_VXCPTCAUSE, MASK_VXCPTCAUSE)
+DECLARE_INSN(vxcptevac, MATCH_VXCPTEVAC, MASK_VXCPTEVAC)
+DECLARE_INSN(vxcpthold, MATCH_VXCPTHOLD, MASK_VXCPTHOLD)
+DECLARE_INSN(vxcptkill, MATCH_VXCPTKILL, MASK_VXCPTKILL)
+DECLARE_INSN(vxcptrestore, MATCH_VXCPTRESTORE, MASK_VXCPTRESTORE)
+DECLARE_INSN(vxcptsave, MATCH_VXCPTSAVE, MASK_VXCPTSAVE)
+DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
+DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
+#endif
+#ifdef DECLARE_CSR
+DECLARE_CSR(fflags, CSR_FFLAGS)
+DECLARE_CSR(frm, CSR_FRM)
+DECLARE_CSR(fcsr, CSR_FCSR)
+DECLARE_CSR(cycle, CSR_CYCLE)
+DECLARE_CSR(time, CSR_TIME)
+DECLARE_CSR(instret, CSR_INSTRET)
+DECLARE_CSR(stats, CSR_STATS)
+DECLARE_CSR(uarch0, CSR_UARCH0)
+DECLARE_CSR(uarch1, CSR_UARCH1)
+DECLARE_CSR(uarch2, CSR_UARCH2)
+DECLARE_CSR(uarch3, CSR_UARCH3)
+DECLARE_CSR(uarch4, CSR_UARCH4)
+DECLARE_CSR(uarch5, CSR_UARCH5)
+DECLARE_CSR(uarch6, CSR_UARCH6)
+DECLARE_CSR(uarch7, CSR_UARCH7)
+DECLARE_CSR(uarch8, CSR_UARCH8)
+DECLARE_CSR(uarch9, CSR_UARCH9)
+DECLARE_CSR(uarch10, CSR_UARCH10)
+DECLARE_CSR(uarch11, CSR_UARCH11)
+DECLARE_CSR(uarch12, CSR_UARCH12)
+DECLARE_CSR(uarch13, CSR_UARCH13)
+DECLARE_CSR(uarch14, CSR_UARCH14)
+DECLARE_CSR(uarch15, CSR_UARCH15)
+DECLARE_CSR(sstatus, CSR_SSTATUS)
+DECLARE_CSR(stvec, CSR_STVEC)
+DECLARE_CSR(stimecmp, CSR_STIMECMP)
+DECLARE_CSR(sscratch, CSR_SSCRATCH)
+DECLARE_CSR(sepc, CSR_SEPC)
+DECLARE_CSR(sptbr, CSR_SPTBR)
+DECLARE_CSR(sasid, CSR_SASID)
+DECLARE_CSR(scycle, CSR_SCYCLE)
+DECLARE_CSR(stime, CSR_STIME)
+DECLARE_CSR(sinstret, CSR_SINSTRET)
+DECLARE_CSR(scause, CSR_SCAUSE)
+DECLARE_CSR(sbadaddr, CSR_SBADADDR)
+DECLARE_CSR(mstatus, CSR_MSTATUS)
+DECLARE_CSR(mscratch, CSR_MSCRATCH)
+DECLARE_CSR(mepc, CSR_MEPC)
+DECLARE_CSR(mcause, CSR_MCAUSE)
+DECLARE_CSR(mbadaddr, CSR_MBADADDR)
+DECLARE_CSR(reset, CSR_RESET)
+DECLARE_CSR(tohost, CSR_TOHOST)
+DECLARE_CSR(fromhost, CSR_FROMHOST)
+DECLARE_CSR(send_ipi, CSR_SEND_IPI)
+DECLARE_CSR(hartid, CSR_HARTID)
+DECLARE_CSR(cycleh, CSR_CYCLEH)
+DECLARE_CSR(timeh, CSR_TIMEH)
+DECLARE_CSR(instreth, CSR_INSTRETH)
+DECLARE_CSR(scycleh, CSR_SCYCLEH)
+DECLARE_CSR(stimeh, CSR_STIMEH)
+DECLARE_CSR(sinstreth, CSR_SINSTRETH)
+#endif
+#ifdef DECLARE_CAUSE
+DECLARE_CAUSE("fflags", CAUSE_FFLAGS)
+DECLARE_CAUSE("frm", CAUSE_FRM)
+DECLARE_CAUSE("fcsr", CAUSE_FCSR)
+DECLARE_CAUSE("cycle", CAUSE_CYCLE)
+DECLARE_CAUSE("time", CAUSE_TIME)
+DECLARE_CAUSE("instret", CAUSE_INSTRET)
+DECLARE_CAUSE("stats", CAUSE_STATS)
+DECLARE_CAUSE("uarch0", CAUSE_UARCH0)
+DECLARE_CAUSE("uarch1", CAUSE_UARCH1)
+DECLARE_CAUSE("uarch2", CAUSE_UARCH2)
+DECLARE_CAUSE("uarch3", CAUSE_UARCH3)
+DECLARE_CAUSE("uarch4", CAUSE_UARCH4)
+DECLARE_CAUSE("uarch5", CAUSE_UARCH5)
+DECLARE_CAUSE("uarch6", CAUSE_UARCH6)
+DECLARE_CAUSE("uarch7", CAUSE_UARCH7)
+DECLARE_CAUSE("uarch8", CAUSE_UARCH8)
+DECLARE_CAUSE("uarch9", CAUSE_UARCH9)
+DECLARE_CAUSE("uarch10", CAUSE_UARCH10)
+DECLARE_CAUSE("uarch11", CAUSE_UARCH11)
+DECLARE_CAUSE("uarch12", CAUSE_UARCH12)
+DECLARE_CAUSE("uarch13", CAUSE_UARCH13)
+DECLARE_CAUSE("uarch14", CAUSE_UARCH14)
+DECLARE_CAUSE("uarch15", CAUSE_UARCH15)
+DECLARE_CAUSE("sstatus", CAUSE_SSTATUS)
+DECLARE_CAUSE("stvec", CAUSE_STVEC)
+DECLARE_CAUSE("stimecmp", CAUSE_STIMECMP)
+DECLARE_CAUSE("sscratch", CAUSE_SSCRATCH)
+DECLARE_CAUSE("sepc", CAUSE_SEPC)
+DECLARE_CAUSE("sptbr", CAUSE_SPTBR)
+DECLARE_CAUSE("sasid", CAUSE_SASID)
+DECLARE_CAUSE("scycle", CAUSE_SCYCLE)
+DECLARE_CAUSE("stime", CAUSE_STIME)
+DECLARE_CAUSE("sinstret", CAUSE_SINSTRET)
+DECLARE_CAUSE("scause", CAUSE_SCAUSE)
+DECLARE_CAUSE("sbadaddr", CAUSE_SBADADDR)
+DECLARE_CAUSE("mstatus", CAUSE_MSTATUS)
+DECLARE_CAUSE("mscratch", CAUSE_MSCRATCH)
+DECLARE_CAUSE("mepc", CAUSE_MEPC)
+DECLARE_CAUSE("mcause", CAUSE_MCAUSE)
+DECLARE_CAUSE("mbadaddr", CAUSE_MBADADDR)
+DECLARE_CAUSE("reset", CAUSE_RESET)
+DECLARE_CAUSE("tohost", CAUSE_TOHOST)
+DECLARE_CAUSE("fromhost", CAUSE_FROMHOST)
+DECLARE_CAUSE("send_ipi", CAUSE_SEND_IPI)
+DECLARE_CAUSE("hartid", CAUSE_HARTID)
+DECLARE_CAUSE("cycleh", CAUSE_CYCLEH)
+DECLARE_CAUSE("timeh", CAUSE_TIMEH)
+DECLARE_CAUSE("instreth", CAUSE_INSTRETH)
+DECLARE_CAUSE("scycleh", CAUSE_SCYCLEH)
+DECLARE_CAUSE("stimeh", CAUSE_STIMEH)
+DECLARE_CAUSE("sinstreth", CAUSE_SINSTRETH)
+#endif
diff -rNU3 dist.orig/gcc/config/riscv/riscv-protos.h dist/gcc/config/riscv/riscv-protos.h
--- dist.orig/gcc/config/riscv/riscv-protos.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv-protos.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,96 @@
+/* Definition of RISC-V target for GNU compiler.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+ Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#ifndef GCC_RISCV_PROTOS_H
+#define GCC_RISCV_PROTOS_H
+
+enum riscv_symbol_type {
+ SYMBOL_ABSOLUTE,
+ SYMBOL_GOT_DISP,
+ SYMBOL_TLS,
+ SYMBOL_TLS_LE,
+ SYMBOL_TLS_IE,
+ SYMBOL_TLS_GD
+};
+#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
+
+enum riscv_code_model {
+ CM_MEDLOW,
+ CM_MEDANY,
+ CM_PIC
+};
+extern enum riscv_code_model riscv_cmodel;
+
+extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
+extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
+extern int riscv_address_insns (rtx, enum machine_mode, bool);
+extern int riscv_const_insns (rtx);
+extern int riscv_split_const_insns (rtx);
+extern int riscv_load_store_insns (rtx, rtx);
+extern rtx riscv_emit_move (rtx, rtx);
+extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
+extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
+extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
+extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
+extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
+
+extern rtx riscv_subword (rtx, bool);
+extern bool riscv_split_64bit_move_p (rtx, rtx);
+extern void riscv_split_doubleword_move (rtx, rtx);
+extern const char *riscv_output_move (rtx, rtx);
+extern const char *riscv_riscv_output_vector_move (enum machine_mode, rtx, rtx);
+#ifdef RTX_CODE
+extern void riscv_expand_scc (rtx *);
+extern void riscv_expand_conditional_branch (rtx *);
+#endif
+extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
+extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
+extern void riscv_set_return_address (rtx, rtx);
+extern bool riscv_expand_block_move (rtx, rtx, rtx);
+extern void riscv_expand_synci_loop (rtx, rtx);
+
+extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT);
+extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT);
+extern void riscv_order_regs_for_local_alloc (void);
+
+extern rtx riscv_return_addr (int, rtx);
+extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
+extern void riscv_expand_prologue (void);
+extern void riscv_expand_epilogue (bool);
+extern bool riscv_can_use_return_insn (void);
+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
+
+extern enum reg_class riscv_secondary_reload_class (enum reg_class,
+ enum machine_mode,
+ rtx, bool);
+extern int riscv_class_max_nregs (enum reg_class, enum machine_mode);
+
+extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
+
+extern void irix_asm_output_align (FILE *, unsigned);
+extern const char *current_section_name (void);
+extern unsigned int current_section_flags (void);
+
+extern void riscv_expand_vector_init (rtx, rtx);
+
+#endif /* ! GCC_RISCV_PROTOS_H */
diff -rNU3 dist.orig/gcc/config/riscv/riscv.c dist/gcc/config/riscv/riscv.c
--- dist.orig/gcc/config/riscv/riscv.c 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv.c 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,4309 @@
+/* Subroutines used for code generation for RISC-V.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+ Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "output.h"
+#include "tree.h"
+//#include "varasm.h"
+//#include "stor-layout.h"
+//#include "calls.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "flags.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "ggc.h"
+#include "gstab.h"
+#include "hashtab.h"
+#include "debug.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "sched-int.h"
+#include "bitmap.h"
+#include "diagnostic.h"
+#include "target-globals.h"
+#include "symcat.h"
+#include
+
+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
+#define UNSPEC_ADDRESS_P(X) \
+ (GET_CODE (X) == UNSPEC \
+ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
+ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
+
+/* Extract the symbol or label from UNSPEC wrapper X. */
+#define UNSPEC_ADDRESS(X) \
+ XVECEXP (X, 0, 0)
+
+/* Extract the symbol type from UNSPEC wrapper X. */
+#define UNSPEC_ADDRESS_TYPE(X) \
+ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
+
+/* The maximum distance between the top of the stack frame and the
+ value sp has when we save and restore registers. This is set by the
+ range of load/store offsets and must also preserve stack alignment. */
+#define RISCV_MAX_FIRST_STACK_STEP (RISCV_IMM_REACH/2 - 16)
+
+/* True if INSN is a riscv.md pattern or asm statement. */
+#define USEFUL_INSN_P(INSN) \
+ (NONDEBUG_INSN_P (INSN) \
+ && GET_CODE (PATTERN (INSN)) != USE \
+ && GET_CODE (PATTERN (INSN)) != CLOBBER \
+ && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
+ && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
+
+/* True if bit BIT is set in VALUE. */
+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
+
+/* Classifies an address.
+
+ ADDRESS_REG
+ A natural register + offset address. The register satisfies
+ riscv_valid_base_register_p and the offset is a const_arith_operand.
+
+ ADDRESS_LO_SUM
+ A LO_SUM rtx. The first operand is a valid base register and
+ the second operand is a symbolic address.
+
+ ADDRESS_CONST_INT
+ A signed 16-bit constant address.
+
+ ADDRESS_SYMBOLIC:
+ A constant symbolic address. */
+enum riscv_address_type {
+ ADDRESS_REG,
+ ADDRESS_LO_SUM,
+ ADDRESS_CONST_INT,
+ ADDRESS_SYMBOLIC
+};
+
+enum riscv_code_model riscv_cmodel = TARGET_DEFAULT_CMODEL;
+
+/* Macros to create an enumeration identifier for a function prototype. */
+#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
+#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
+#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
+#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
+
+/* Classifies the prototype of a built-in function. */
+enum riscv_function_type {
+#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
+#include "config/riscv/riscv-ftypes.def"
+#undef DEF_RISCV_FTYPE
+ RISCV_MAX_FTYPE_MAX
+};
+
+/* Specifies how a built-in function should be converted into rtl. */
+enum riscv_builtin_type {
+ /* The function corresponds directly to an .md pattern. The return
+ value is mapped to operand 0 and the arguments are mapped to
+ operands 1 and above. */
+ RISCV_BUILTIN_DIRECT,
+
+ /* The function corresponds directly to an .md pattern. There is no return
+ value and the arguments are mapped to operands 0 and above. */
+ RISCV_BUILTIN_DIRECT_NO_TARGET
+};
+
+/* Information about a function's frame layout. */
+struct GTY(()) riscv_frame_info {
+ /* The size of the frame in bytes. */
+ HOST_WIDE_INT total_size;
+
+ /* Bit X is set if the function saves or restores GPR X. */
+ unsigned int mask;
+
+ /* Likewise FPR X. */
+ unsigned int fmask;
+
+ /* Offsets of fixed-point and floating-point save areas from frame bottom */
+ HOST_WIDE_INT gp_sp_offset;
+ HOST_WIDE_INT fp_sp_offset;
+
+ /* Offset of virtual frame pointer from stack pointer/frame bottom */
+ HOST_WIDE_INT frame_pointer_offset;
+
+ /* Offset of hard frame pointer from stack pointer/frame bottom */
+ HOST_WIDE_INT hard_frame_pointer_offset;
+
+ /* The offset of arg_pointer_rtx from the bottom of the frame. */
+ HOST_WIDE_INT arg_pointer_offset;
+};
+
+struct GTY(()) machine_function {
+ /* The number of extra stack bytes taken up by register varargs.
+ This area is allocated by the callee at the very top of the frame. */
+ int varargs_size;
+
+ /* The current frame information, calculated by riscv_compute_frame_info. */
+ struct riscv_frame_info frame;
+};
+
+/* Information about a single argument. */
+struct riscv_arg_info {
+ /* True if the argument is passed in a floating-point register, or
+ would have been if we hadn't run out of registers. */
+ bool fpr_p;
+
+ /* The number of words passed in registers, rounded up. */
+ unsigned int reg_words;
+
+ /* For EABI, the offset of the first register from GP_ARG_FIRST or
+ FP_ARG_FIRST. For other ABIs, the offset of the first register from
+ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
+ comment for details).
+
+ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
+ on the stack. */
+ unsigned int reg_offset;
+
+ /* The number of words that must be passed on the stack, rounded up. */
+ unsigned int stack_words;
+
+ /* The offset from the start of the stack overflow area of the argument's
+ first stack word. Only meaningful when STACK_WORDS is nonzero. */
+ unsigned int stack_offset;
+};
+
+/* Information about an address described by riscv_address_type.
+
+ ADDRESS_CONST_INT
+ No fields are used.
+
+ ADDRESS_REG
+ REG is the base register and OFFSET is the constant offset.
+
+ ADDRESS_LO_SUM
+ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
+ is the type of symbol it references.
+
+ ADDRESS_SYMBOLIC
+ SYMBOL_TYPE is the type of symbol that the address references. */
+struct riscv_address_info {
+ enum riscv_address_type type;
+ rtx reg;
+ rtx offset;
+ enum riscv_symbol_type symbol_type;
+};
+
+/* One stage in a constant building sequence. These sequences have
+ the form:
+
+ A = VALUE[0]
+ A = A CODE[1] VALUE[1]
+ A = A CODE[2] VALUE[2]
+ ...
+
+ where A is an accumulator, each CODE[i] is a binary rtl operation
+ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
+struct riscv_integer_op {
+ enum rtx_code code;
+ unsigned HOST_WIDE_INT value;
+};
+
+/* The largest number of operations needed to load an integer constant.
+ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
+ but we may attempt and reject even worse sequences. */
+#define RISCV_MAX_INTEGER_OPS 32
+
+/* Costs of various operations on the different architectures. */
+
+struct riscv_tune_info
+{
+ unsigned short fp_add[2];
+ unsigned short fp_mul[2];
+ unsigned short fp_div[2];
+ unsigned short int_mul[2];
+ unsigned short int_div[2];
+ unsigned short issue_rate;
+ unsigned short branch_cost;
+ unsigned short fp_to_int_cost;
+ unsigned short memory_cost;
+};
+
+/* Information about one CPU we know about. */
+struct riscv_cpu_info {
+ /* This CPU's canonical name. */
+ const char *name;
+
+ /* The RISC-V ISA and extensions supported by this CPU. */
+ const char *isa;
+
+ /* Tuning parameters for this CPU. */
+ const struct riscv_tune_info *tune_info;
+};
+
+/* Global variables for machine-dependent things. */
+
+/* Which tuning parameters to use. */
+static const struct riscv_tune_info *tune_info;
+
+/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
+bool riscv_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
+
+/* riscv_lo_relocs[X] is the relocation to use when a symbol of type X
+ appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
+ if they are matched by a special .md file pattern. */
+const char *riscv_lo_relocs[NUM_SYMBOL_TYPES];
+
+/* Likewise for HIGHs. */
+const char *riscv_hi_relocs[NUM_SYMBOL_TYPES];
+
+/* Index R is the smallest register class that contains register R. */
+const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, T_REGS, T_REGS, T_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ T_REGS, T_REGS, T_REGS, T_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FRAME_REGS, FRAME_REGS,
+};
+
+/* Costs to use when optimizing for size. */
+static const struct riscv_tune_info rocket_tune_info = {
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
+ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
+ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
+ 1, /* issue_rate */
+ 3, /* branch_cost */
+ COSTS_N_INSNS (2), /* fp_to_int_cost */
+ 5 /* memory_cost */
+};
+
+/* Costs to use when optimizing for size. */
+static const struct riscv_tune_info optimize_size_tune_info = {
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
+ 1, /* issue_rate */
+ 1, /* branch_cost */
+ COSTS_N_INSNS (1), /* fp_to_int_cost */
+ 1 /* memory_cost */
+};
+
+/* A table describing all the processors GCC knows about. */
+static const struct riscv_cpu_info riscv_cpu_info_table[] = {
+ /* Entries for generic ISAs. */
+ { "rocket", "IMAFD", &rocket_tune_info },
+};
+
+/* Return the riscv_cpu_info entry for the given name string. */
+
+static const struct riscv_cpu_info *
+riscv_parse_cpu (const char *cpu_string)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
+ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
+ return riscv_cpu_info_table + i;
+
+ error ("unknown cpu `%s'", cpu_string);
+ return riscv_cpu_info_table;
+}
+
+/* Fill CODES with a sequence of rtl operations to load VALUE.
+ Return the number of operations needed. */
+
+static int
+riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
+ enum machine_mode mode)
+{
+ HOST_WIDE_INT low_part = RISCV_CONST_LOW_PART (value);
+ int cost = INT_MAX, alt_cost;
+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
+
+ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
+ {
+ /* Simply ADDI or LUI */
+ codes[0].code = UNKNOWN;
+ codes[0].value = value;
+ return 1;
+ }
+
+ /* End with ADDI */
+ if (low_part != 0
+ && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
+ {
+ cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
+ codes[cost-1].code = PLUS;
+ codes[cost-1].value = low_part;
+ }
+
+ /* End with XORI */
+ if (cost > 2 && (low_part < 0 || mode == HImode))
+ {
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
+ alt_codes[alt_cost-1].code = XOR;
+ alt_codes[alt_cost-1].value = low_part;
+ if (alt_cost < cost)
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
+ }
+
+ /* Eliminate trailing zeros and end with SLLI */
+ if (cost > 2 && (value & 1) == 0)
+ {
+ int shift = 0;
+ while ((value & 1) == 0)
+ shift++, value >>= 1;
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
+ alt_codes[alt_cost-1].code = ASHIFT;
+ alt_codes[alt_cost-1].value = shift;
+ if (alt_cost < cost)
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
+ }
+
+ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
+ return cost;
+}
+
+static int
+riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
+ enum machine_mode mode)
+{
+ int cost = riscv_build_integer_1 (codes, value, mode);
+
+ /* Eliminate leading zeros and end with SRLI */
+ if (value > 0 && cost > 2)
+ {
+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
+ int alt_cost, shift = 0;
+ HOST_WIDE_INT shifted_val;
+
+ /* Try filling trailing bits with 1s */
+ while ((value << shift) >= 0)
+ shift++;
+ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
+ alt_codes[alt_cost-1].code = LSHIFTRT;
+ alt_codes[alt_cost-1].value = shift;
+ if (alt_cost < cost)
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
+
+ /* Try filling trailing bits with 0s */
+ shifted_val = value << shift;
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
+ alt_codes[alt_cost-1].code = LSHIFTRT;
+ alt_codes[alt_cost-1].value = shift;
+ if (alt_cost < cost)
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
+ }
+
+ return cost;
+}
+
+static int
+riscv_split_integer_cost (HOST_WIDE_INT val)
+{
+ int cost;
+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
+
+ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
+ if (loval != hival)
+ cost += riscv_build_integer (codes, hival, VOIDmode);
+
+ return cost;
+}
+
+static int
+riscv_integer_cost (HOST_WIDE_INT val)
+{
+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
+ return MIN (riscv_build_integer (codes, val, VOIDmode),
+ riscv_split_integer_cost (val));
+}
+
+/* Try to split a 64b integer into 32b parts, then reassemble. */
+
+static rtx
+riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
+ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
+
+ riscv_move_integer (hi, hi, hival);
+ riscv_move_integer (lo, lo, loval);
+
+ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
+ hi = force_reg (mode, hi);
+
+ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
+}
+
+/* Return true if X is a thread-local symbol. */
+
+static bool
+riscv_tls_symbol_p (const_rtx x)
+{
+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+static bool
+riscv_symbol_binds_local_p (const_rtx x)
+{
+ return (SYMBOL_REF_DECL (x)
+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+ : SYMBOL_REF_LOCAL_P (x));
+}
+
+/* Return the method that should be used to access SYMBOL_REF or
+ LABEL_REF X in context CONTEXT. */
+
+static enum riscv_symbol_type
+riscv_classify_symbol (const_rtx x)
+{
+ if (riscv_tls_symbol_p (x))
+ return SYMBOL_TLS;
+
+ if (GET_CODE (x) == LABEL_REF)
+ {
+ if (LABEL_REF_NONLOCAL_P (x))
+ return SYMBOL_GOT_DISP;
+ return SYMBOL_ABSOLUTE;
+ }
+
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ if (flag_pic && !riscv_symbol_binds_local_p (x))
+ return SYMBOL_GOT_DISP;
+
+ return SYMBOL_ABSOLUTE;
+}
+
+/* Classify the base of symbolic expression X, given that X appears in
+ context CONTEXT. */
+
+static enum riscv_symbol_type
+riscv_classify_symbolic_expression (rtx x)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ if (UNSPEC_ADDRESS_P (x))
+ return UNSPEC_ADDRESS_TYPE (x);
+
+ return riscv_classify_symbol (x);
+}
+
+/* Return true if X is a symbolic constant that can be used in context
+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
+
+bool
+riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ if (UNSPEC_ADDRESS_P (x))
+ {
+ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
+ x = UNSPEC_ADDRESS (x);
+ }
+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+ *symbol_type = riscv_classify_symbol (x);
+ else
+ return false;
+
+ if (offset == const0_rtx)
+ return true;
+
+ /* Check whether a nonzero offset is valid for the underlying
+ relocations. */
+ switch (*symbol_type)
+ {
+ case SYMBOL_ABSOLUTE:
+ case SYMBOL_TLS_LE:
+ return (int32_t) INTVAL (offset) == INTVAL (offset);
+
+ default:
+ return false;
+ }
+ gcc_unreachable ();
+}
+
+/* Returns the number of instructions necessary to reference a symbol. */
+
+static int riscv_symbol_insns (enum riscv_symbol_type type)
+{
+ switch (type)
+ {
+ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
+ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
+ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
+ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
+ default: gcc_unreachable();
+ }
+}
+
+/* A for_each_rtx callback. Stop the search if *X references a
+ thread-local symbol. */
+
+static int
+riscv_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return riscv_tls_symbol_p (*x);
+}
+
+/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
+
+static bool
+riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ return riscv_const_insns (x) > 0;
+}
+
+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
+
+static bool
+riscv_cannot_force_const_mem (enum machine_mode mode, rtx x)
+{
+ enum riscv_symbol_type type;
+ rtx base, offset;
+
+ /* There is no assembler syntax for expressing an address-sized
+ high part. */
+ if (GET_CODE (x) == HIGH)
+ return true;
+
+ /* As an optimization, reject constants that riscv_legitimize_move
+ can expand inline.
+
+ Suppose we have a multi-instruction sequence that loads constant C
+ into register R. If R does not get allocated a hard register, and
+ R is used in an operand that allows both registers and memory
+ references, reload will consider forcing C into memory and using
+ one of the instruction's memory alternatives. Returning false
+ here will force it to use an input reload instead. */
+ if (CONST_INT_P (x) && riscv_legitimate_constant_p (mode, x))
+ return true;
+
+ split_const (x, &base, &offset);
+ if (riscv_symbolic_constant_p (base, &type))
+ {
+ /* The same optimization as for CONST_INT. */
+ if (SMALL_INT (offset) && riscv_symbol_insns (type) > 0)
+ return true;
+
+ /* It's not worth creating additional dynamic relocations. */
+ if (flag_pic)
+ return true;
+ }
+
+ /* TLS symbols must be computed by riscv_legitimize_move. */
+ if (for_each_rtx (&x, &riscv_tls_symbol_ref_1, NULL))
+ return true;
+
+ return false;
+}
+
+/* Return true if register REGNO is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+int
+riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
+ bool strict_p)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict_p)
+ return true;
+ regno = reg_renumber[regno];
+ }
+
+ /* These fake registers will be eliminated to either the stack or
+ hard frame pointer, both of which are usually valid base registers.
+ Reload deals with the cases where the eliminated form isn't valid. */
+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
+ return true;
+
+ return GP_REG_P (regno);
+}
+
+/* Return true if X is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+static bool
+riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
+{
+ if (!strict_p && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x)
+ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
+}
+
+/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
+ can address a value of mode MODE. */
+
+static bool
+riscv_valid_offset_p (rtx x, enum machine_mode mode)
+{
+ /* Check that X is a signed 12-bit number. */
+ if (!const_arith_operand (x, Pmode))
+ return false;
+
+ /* We may need to split multiword moves, so make sure that every word
+ is accessible. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
+ return false;
+
+ return true;
+}
+
+/* Return true if a LO_SUM can address a value of mode MODE when the
+ LO_SUM symbol has type SYMBOL_TYPE. */
+
+static bool
+riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
+{
+ /* Check that symbols of type SYMBOL_TYPE can be used to access values
+ of mode MODE. */
+ if (riscv_symbol_insns (symbol_type) == 0)
+ return false;
+
+ /* Check that there is a known low-part relocation. */
+ if (riscv_lo_relocs[symbol_type] == NULL)
+ return false;
+
+ /* We may need to split multiword moves, so make sure that each word
+ can be accessed without inducing a carry. This is mainly needed
+ for o64, which has historically only guaranteed 64-bit alignment
+ for 128-bit types. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
+ return false;
+
+ return true;
+}
+
+/* Return true if X is a valid address for machine mode MODE. If it is,
+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
+ effect. */
+
+static bool
+riscv_classify_address (struct riscv_address_info *info, rtx x,
+ enum machine_mode mode, bool strict_p)
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ case SUBREG:
+ info->type = ADDRESS_REG;
+ info->reg = x;
+ info->offset = const0_rtx;
+ return riscv_valid_base_register_p (info->reg, mode, strict_p);
+
+ case PLUS:
+ info->type = ADDRESS_REG;
+ info->reg = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
+ && riscv_valid_offset_p (info->offset, mode));
+
+ case LO_SUM:
+ info->type = ADDRESS_LO_SUM;
+ info->reg = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ /* We have to trust the creator of the LO_SUM to do something vaguely
+ sane. Target-independent code that creates a LO_SUM should also
+ create and verify the matching HIGH. Target-independent code that
+ adds an offset to a LO_SUM must prove that the offset will not
+ induce a carry. Failure to do either of these things would be
+ a bug, and we are not required to check for it here. The RISCV
+ backend itself should only create LO_SUMs for valid symbolic
+ constants, with the high part being either a HIGH or a copy
+ of _gp. */
+ info->symbol_type
+ = riscv_classify_symbolic_expression (info->offset);
+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
+ && riscv_valid_lo_sum_p (info->symbol_type, mode));
+
+ case CONST_INT:
+ /* Small-integer addresses don't occur very often, but they
+ are legitimate if $0 is a valid base register. */
+ info->type = ADDRESS_CONST_INT;
+ return SMALL_INT (x);
+
+ default:
+ return false;
+ }
+}
+
+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
+
+static bool
+riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
+{
+ struct riscv_address_info addr;
+
+ return riscv_classify_address (&addr, x, mode, strict_p);
+}
+
+/* Return the number of instructions needed to load or store a value
+ of mode MODE at address X. Return 0 if X isn't valid for MODE.
+ Assume that multiword moves may need to be split into word moves
+ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
+ enough. */
+
+int
+riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
+{
+ struct riscv_address_info addr;
+ int n = 1;
+
+ if (!riscv_classify_address (&addr, x, mode, false))
+ return 0;
+
+ /* BLKmode is used for single unaligned loads and stores and should
+ not count as a multiword mode. */
+ if (mode != BLKmode && might_split_p)
+ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if (addr.type == ADDRESS_LO_SUM)
+ n += riscv_symbol_insns (addr.symbol_type) - 1;
+
+ return n;
+}
+
+/* Return the number of instructions needed to load constant X.
+ Return 0 if X isn't a valid constant. */
+
+int
+riscv_const_insns (rtx x)
+{
+ enum riscv_symbol_type symbol_type;
+ rtx offset;
+
+ switch (GET_CODE (x))
+ {
+ case HIGH:
+ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
+ || !riscv_hi_relocs[symbol_type])
+ return 0;
+
+ /* This is simply an LUI. */
+ return 1;
+
+ case CONST_INT:
+ {
+ int cost = riscv_integer_cost (INTVAL (x));
+ /* Force complicated constants to memory. */
+ return cost < 4 ? cost : 0;
+ }
+
+ case CONST_DOUBLE:
+ case CONST_VECTOR:
+ /* Allow zeros for normal mode, where we can use x0. */
+ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
+
+ case CONST:
+ /* See if we can refer to X directly. */
+ if (riscv_symbolic_constant_p (x, &symbol_type))
+ return riscv_symbol_insns (symbol_type);
+
+ /* Otherwise try splitting the constant into a base and offset.
+ If the offset is a 16-bit value, we can load the base address
+ into a register and then use (D)ADDIU to add in the offset.
+ If the offset is larger, we can load the base and offset
+ into separate registers and add them together with (D)ADDU.
+ However, the latter is only possible before reload; during
+ and after reload, we must have the option of forcing the
+ constant into the pool instead. */
+ split_const (x, &x, &offset);
+ if (offset != 0)
+ {
+ int n = riscv_const_insns (x);
+ if (n != 0)
+ {
+ if (SMALL_INT (offset))
+ return n + 1;
+ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
+ return n + 1 + riscv_integer_cost (INTVAL (offset));
+ }
+ }
+ return 0;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return riscv_symbol_insns (riscv_classify_symbol (x));
+
+ default:
+ return 0;
+ }
+}
+
+/* X is a doubleword constant that can be handled by splitting it into
+ two words and loading each word separately. Return the number of
+ instructions required to do this. */
+
+int
+riscv_split_const_insns (rtx x)
+{
+ unsigned int low, high;
+
+ low = riscv_const_insns (riscv_subword (x, false));
+ high = riscv_const_insns (riscv_subword (x, true));
+ gcc_assert (low > 0 && high > 0);
+ return low + high;
+}
+
+/* Return the number of instructions needed to implement INSN,
+ given that it loads from or stores to MEM. */
+
+int
+riscv_load_store_insns (rtx mem, rtx insn)
+{
+ enum machine_mode mode;
+ bool might_split_p;
+ rtx set;
+
+ gcc_assert (MEM_P (mem));
+ mode = GET_MODE (mem);
+
+ /* Try to prove that INSN does not need to be split. */
+ might_split_p = true;
+ if (GET_MODE_BITSIZE (mode) == 64)
+ {
+ set = single_set (insn);
+ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
+ might_split_p = false;
+ }
+
+ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
+}
+
+/* Emit a move from SRC to DEST. Assume that the move expanders can
+ handle all moves if !can_create_pseudo_p (). The distinction is
+ important because, unlike emit_move_insn, the move expanders know
+ how to force Pmode objects into the constant pool even when the
+ constant pool address is not itself legitimate. */
+
+rtx
+riscv_emit_move (rtx dest, rtx src)
+{
+ return (can_create_pseudo_p ()
+ ? emit_move_insn (dest, src)
+ : emit_move_insn_1 (dest, src));
+}
+
+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
+
+static void
+riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
+{
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
+}
+
+/* Compute (CODE OP0 OP1) and store the result in a new register
+ of mode MODE. Return that new register. */
+
+static rtx
+riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
+{
+ rtx reg;
+
+ reg = gen_reg_rtx (mode);
+ riscv_emit_binary (code, reg, op0, op1);
+ return reg;
+}
+
+/* Copy VALUE to a register and return that register. If new pseudos
+ are allowed, copy it into a new register, otherwise use DEST. */
+
+static rtx
+riscv_force_temporary (rtx dest, rtx value)
+{
+ if (can_create_pseudo_p ())
+ return force_reg (Pmode, value);
+ else
+ {
+ riscv_emit_move (dest, value);
+ return dest;
+ }
+}
+
+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
+ then add CONST_INT OFFSET to the result. */
+
+static rtx
+riscv_unspec_address_offset (rtx base, rtx offset,
+ enum riscv_symbol_type symbol_type)
+{
+ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
+ UNSPEC_ADDRESS_FIRST + symbol_type);
+ if (offset != const0_rtx)
+ base = gen_rtx_PLUS (Pmode, base, offset);
+ return gen_rtx_CONST (Pmode, base);
+}
+
+/* Return an UNSPEC address with underlying address ADDRESS and symbol
+ type SYMBOL_TYPE. */
+
+rtx
+riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
+{
+ rtx base, offset;
+
+ split_const (address, &base, &offset);
+ return riscv_unspec_address_offset (base, offset, symbol_type);
+}
+
+/* If OP is an UNSPEC address, return the address to which it refers,
+ otherwise return OP itself. */
+
+static rtx
+riscv_strip_unspec_address (rtx op)
+{
+ rtx base, offset;
+
+ split_const (op, &base, &offset);
+ if (UNSPEC_ADDRESS_P (base))
+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
+ return op;
+}
+
+/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
+ high part to BASE and return the result. Just return BASE otherwise.
+ TEMP is as for riscv_force_temporary.
+
+ The returned expression can be used as the first operand to a LO_SUM. */
+
+static rtx
+riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
+{
+ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
+ return riscv_force_temporary (temp, addr);
+}
+
+/* Load an entry from the GOT. */
+static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
+{
+ return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
+}
+
+static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
+{
+ return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
+}
+
+static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
+{
+ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+ return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
+}
+
+/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
+ it appears in a MEM of that mode. Return true if ADDR is a legitimate
+ constant in that context and can be split into high and low parts.
+ If so, and if LOW_OUT is nonnull, emit the high part and store the
+ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
+
+ TEMP is as for riscv_force_temporary and is used to load the high
+ part into a register.
+
+ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
+ a legitimize SET_SRC for an .md pattern, otherwise the low part
+ is guaranteed to be a legitimate address for mode MODE. */
+
+bool
+riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
+{
+ enum riscv_symbol_type symbol_type;
+ rtx high;
+
+ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
+ || !riscv_symbolic_constant_p (addr, &symbol_type)
+ || riscv_symbol_insns (symbol_type) == 0
+ || !riscv_hi_relocs[symbol_type])
+ return false;
+
+ if (low_out)
+ {
+ switch (symbol_type)
+ {
+ case SYMBOL_ABSOLUTE:
+ high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
+ high = riscv_force_temporary (temp, high);
+ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ return true;
+}
+
+/* Return a legitimate address for REG + OFFSET. TEMP is as for
+ riscv_force_temporary; it is only needed when OFFSET is not a
+ SMALL_OPERAND. */
+
+static rtx
+riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
+{
+ if (!SMALL_OPERAND (offset))
+ {
+ rtx high;
+
+ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
+ The addition inside the macro CONST_HIGH_PART may cause an
+ overflow, so we need to force a sign-extension check. */
+ high = gen_int_mode (RISCV_CONST_HIGH_PART (offset), Pmode);
+ offset = RISCV_CONST_LOW_PART (offset);
+ high = riscv_force_temporary (temp, high);
+ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
+ }
+ return plus_constant (Pmode, reg, offset);
+}
+
+/* The __tls_get_attr symbol. */
+static GTY(()) rtx riscv_tls_symbol;
+
+/* Return an instruction sequence that calls __tls_get_addr. SYM is
+ the TLS symbol we are referencing and TYPE is the symbol type to use
+ (either global dynamic or local dynamic). RESULT is an RTX for the
+ return value location. */
+
+static rtx
+riscv_call_tls_get_addr (rtx sym, rtx result)
+{
+ rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+
+ if (!riscv_tls_symbol)
+ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
+
+ start_sequence ();
+
+ emit_insn (riscv_got_load_tls_gd (a0, sym));
+ insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
+ RTL_CONST_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
+ insn = get_insns ();
+
+ end_sequence ();
+
+ return insn;
+}
+
+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
+ its address. The return value will be both a valid address and a valid
+ SET_SRC (either a REG or a LO_SUM). */
+
+static rtx
+riscv_legitimize_tls_address (rtx loc)
+{
+ rtx dest, insn, tp, tmp1;
+ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
+
+ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
+ if (!flag_pic)
+ model = TLS_MODEL_LOCAL_EXEC;
+
+ switch (model)
+ {
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ /* Rely on section anchors for the optimization that LDM TLS
+ provides. The anchor's address is loaded with GD TLS. */
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
+ insn = riscv_call_tls_get_addr (loc, tmp1);
+ dest = gen_reg_rtx (Pmode);
+ emit_libcall_block (insn, dest, tmp1, loc);
+ break;
+
+ case TLS_MODEL_INITIAL_EXEC:
+ /* la.tls.ie; tp-relative add */
+ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+ tmp1 = gen_reg_rtx (Pmode);
+ emit_insn (riscv_got_load_tls_ie (tmp1, loc));
+ dest = gen_reg_rtx (Pmode);
+ emit_insn (gen_add3_insn (dest, tmp1, tp));
+ break;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
+ dest = gen_reg_rtx (Pmode);
+ emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
+ dest = gen_rtx_LO_SUM (Pmode, dest,
+ riscv_unspec_address (loc, SYMBOL_TLS_LE));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return dest;
+}
+
+/* If X is not a valid address for mode MODE, force it into a register. */
+
+static rtx
+riscv_force_address (rtx x, enum machine_mode mode)
+{
+ if (!riscv_legitimate_address_p (mode, x, false))
+ x = force_reg (Pmode, x);
+ return x;
+}
+
+/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
+ be legitimized in a way that the generic machinery might not expect,
+ return a new address, otherwise return NULL. MODE is the mode of
+ the memory being accessed. */
+
+static rtx
+riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ rtx addr;
+
+ if (riscv_tls_symbol_p (x))
+ return riscv_legitimize_tls_address (x);
+
+ /* See if the address can split into a high part and a LO_SUM. */
+ if (riscv_split_symbol (NULL, x, mode, &addr))
+ return riscv_force_address (addr, mode);
+
+ /* Handle BASE + OFFSET using riscv_add_offset. */
+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
+ && INTVAL (XEXP (x, 1)) != 0)
+ {
+ rtx base = XEXP (x, 0);
+ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
+
+ if (!riscv_valid_base_register_p (base, mode, false))
+ base = copy_to_mode_reg (Pmode, base);
+ addr = riscv_add_offset (NULL, base, offset);
+ return riscv_force_address (addr, mode);
+ }
+
+ return x;
+}
+
+/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
+
+void
+riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
+{
+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
+ enum machine_mode mode;
+ int i, num_ops;
+ rtx x;
+
+ mode = GET_MODE (dest);
+ num_ops = riscv_build_integer (codes, value, mode);
+
+ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
+ && num_ops >= riscv_split_integer_cost (value))
+ x = riscv_split_integer (value, mode);
+ else
+ {
+ /* Apply each binary operation to X. */
+ x = GEN_INT (codes[0].value);
+
+ for (i = 1; i < num_ops; i++)
+ {
+ if (!can_create_pseudo_p ())
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, temp, x));
+ x = temp;
+ }
+ else
+ x = force_reg (mode, x);
+
+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
+ }
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+}
+
+/* Subroutine of riscv_legitimize_move. Move constant SRC into register
+ DEST given that SRC satisfies immediate_operand but doesn't satisfy
+ move_operand. */
+
+static void
+riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
+{
+ rtx base, offset;
+
+ /* Split moves of big integers into smaller pieces. */
+ if (splittable_const_int_operand (src, mode))
+ {
+ riscv_move_integer (dest, dest, INTVAL (src));
+ return;
+ }
+
+ /* Split moves of symbolic constants into high/low pairs. */
+ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, src));
+ return;
+ }
+
+ /* Generate the appropriate access sequences for TLS symbols. */
+ if (riscv_tls_symbol_p (src))
+ {
+ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
+ return;
+ }
+
+ /* If we have (const (plus symbol offset)), and that expression cannot
+ be forced into memory, load the symbol first and add in the offset. Also
+ prefer to do this even if the constant _can_ be forced into memory, as it
+ usually produces better code. */
+ split_const (src, &base, &offset);
+ if (offset != const0_rtx
+ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
+ {
+ base = riscv_force_temporary (dest, base);
+ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
+ return;
+ }
+
+ src = force_const_mem (mode, src);
+
+ /* When using explicit relocs, constant pool references are sometimes
+ not legitimate addresses. */
+ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
+ riscv_emit_move (dest, src);
+}
+
+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
+ sequence that is valid. */
+
+bool
+riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
+{
+ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
+ {
+ riscv_emit_move (dest, force_reg (mode, src));
+ return true;
+ }
+
+ /* We need to deal with constants that would be legitimate
+ immediate_operands but aren't legitimate move_operands. */
+ if (CONSTANT_P (src) && !move_operand (src, mode))
+ {
+ riscv_legitimize_const_move (mode, dest, src);
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
+ return true;
+ }
+ return false;
+}
+
+/* Return true if there is an instruction that implements CODE and accepts
+ X as an immediate operand. */
+
+static int
+riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
+{
+ switch (code)
+ {
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ /* All shift counts are truncated to a valid constant. */
+ return true;
+
+ case AND:
+ case IOR:
+ case XOR:
+ case PLUS:
+ case LT:
+ case LTU:
+ /* These instructions take 12-bit signed immediates. */
+ return SMALL_OPERAND (x);
+
+ case LE:
+ /* We add 1 to the immediate and use SLT. */
+ return SMALL_OPERAND (x + 1);
+
+ case LEU:
+ /* Likewise SLTU, but reject the always-true case. */
+ return SMALL_OPERAND (x + 1) && x + 1 != 0;
+
+ case GE:
+ case GEU:
+ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
+ return x == 1;
+
+ default:
+ /* By default assume that x0 can be used for 0. */
+ return x == 0;
+ }
+}
+
+/* Return the cost of binary operation X, given that the instruction
+ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
+ instructions and that the sequence of a double-word operation takes
+ DOUBLE_INSNS instructions. */
+
+static int
+riscv_binary_cost (rtx x, int single_insns, int double_insns)
+{
+ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
+ return COSTS_N_INSNS (double_insns);
+ return COSTS_N_INSNS (single_insns);
+}
+
+/* Return the cost of sign-extending OP to mode MODE, not including the
+ cost of OP itself. */
+
+static int
+riscv_sign_extend_cost (enum machine_mode mode, rtx op)
+{
+ if (MEM_P (op))
+ /* Extended loads are as cheap as unextended ones. */
+ return 0;
+
+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
+ /* A sign extension from SImode to DImode in 64-bit mode is free. */
+ return 0;
+
+ /* We need to use a shift left and a shift right. */
+ return COSTS_N_INSNS (2);
+}
+
+/* Return the cost of zero-extending OP to mode MODE, not including the
+ cost of OP itself. */
+
+static int
+riscv_zero_extend_cost (enum machine_mode mode, rtx op)
+{
+ if (MEM_P (op))
+ /* Extended loads are as cheap as unextended ones. */
+ return 0;
+
+ if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
+ ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
+ /* We need a shift left by 32 bits and a shift right by 32 bits. */
+ return COSTS_N_INSNS (2);
+
+ /* We can use ANDI. */
+ return COSTS_N_INSNS (1);
+}
+
+/* Implement TARGET_RTX_COSTS. */
+
+static bool
+riscv_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed)
+{
+ enum machine_mode mode = GET_MODE (x);
+ bool float_mode_p = FLOAT_MODE_P (mode);
+ int cost;
+
+ switch (code)
+ {
+ case CONST_INT:
+ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
+ {
+ *total = 0;
+ return true;
+ }
+ /* Fall through. */
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ case CONST:
+ if (speed)
+ *total = 1;
+ else if ((cost = riscv_const_insns (x)) > 0)
+ *total = COSTS_N_INSNS (cost);
+ else /* The instruction will be fetched from the constant pool. */
+ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
+ return true;
+
+ case MEM:
+ /* If the address is legitimate, return the number of
+ instructions it needs. */
+ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
+ {
+ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
+ return true;
+ }
+ /* Otherwise use the default handling. */
+ return false;
+
+ case NOT:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
+ return false;
+
+ case AND:
+ case IOR:
+ case XOR:
+ /* Double-word operations use two single-word operations. */
+ *total = riscv_binary_cost (x, 1, 2);
+ return false;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
+ return false;
+
+ case ABS:
+ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
+ return false;
+
+ case LO_SUM:
+ *total = set_src_cost (XEXP (x, 0), speed);
+ return true;
+
+ case LT:
+ case LTU:
+ case LE:
+ case LEU:
+ case GT:
+ case GTU:
+ case GE:
+ case GEU:
+ case EQ:
+ case NE:
+ case UNORDERED:
+ case LTGT:
+ /* Branch comparisons have VOIDmode, so use the first operand's
+ mode instead. */
+ mode = GET_MODE (XEXP (x, 0));
+ if (float_mode_p)
+ *total = tune_info->fp_add[mode == DFmode];
+ else
+ *total = riscv_binary_cost (x, 1, 3);
+ return false;
+
+ case MINUS:
+ if (float_mode_p
+ && !HONOR_NANS (mode)
+ && !HONOR_SIGNED_ZEROS (mode))
+ {
+ /* See if we can use NMADD or NMSUB. See riscv.md for the
+ associated patterns. */
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
+ {
+ *total = (tune_info->fp_mul[mode == DFmode]
+ + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
+ + set_src_cost (XEXP (op0, 1), speed)
+ + set_src_cost (op1, speed));
+ return true;
+ }
+ if (GET_CODE (op1) == MULT)
+ {
+ *total = (tune_info->fp_mul[mode == DFmode]
+ + set_src_cost (op0, speed)
+ + set_src_cost (XEXP (op1, 0), speed)
+ + set_src_cost (XEXP (op1, 1), speed));
+ return true;
+ }
+ }
+ /* Fall through. */
+
+ case PLUS:
+ if (float_mode_p)
+ *total = tune_info->fp_add[mode == DFmode];
+ else
+ *total = riscv_binary_cost (x, 1, 4);
+ return false;
+
+ case NEG:
+ if (float_mode_p
+ && !HONOR_NANS (mode)
+ && HONOR_SIGNED_ZEROS (mode))
+ {
+ /* See if we can use NMADD or NMSUB. See riscv.md for the
+ associated patterns. */
+ rtx op = XEXP (x, 0);
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ && GET_CODE (XEXP (op, 0)) == MULT)
+ {
+ *total = (tune_info->fp_mul[mode == DFmode]
+ + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
+ + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
+ + set_src_cost (XEXP (op, 1), speed));
+ return true;
+ }
+ }
+
+ if (float_mode_p)
+ *total = tune_info->fp_add[mode == DFmode];
+ else
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
+ return false;
+
+ case MULT:
+ if (float_mode_p)
+ *total = tune_info->fp_mul[mode == DFmode];
+ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
+ else if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = tune_info->int_mul[mode == DImode];
+ return false;
+
+ case DIV:
+ case SQRT:
+ case MOD:
+ if (float_mode_p)
+ {
+ *total = tune_info->fp_div[mode == DFmode];
+ return false;
+ }
+ /* Fall through. */
+
+ case UDIV:
+ case UMOD:
+ if (speed)
+ *total = tune_info->int_div[mode == DImode];
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case SIGN_EXTEND:
+ *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
+ return false;
+
+ case ZERO_EXTEND:
+ *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
+ return false;
+
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case FLOAT_EXTEND:
+ case FLOAT_TRUNCATE:
+ *total = tune_info->fp_add[mode == DFmode];
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Implement TARGET_ADDRESS_COST. */
+
+static int
+riscv_address_cost (rtx addr, enum machine_mode mode,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ return riscv_address_insns (addr, mode, false);
+}
+
+/* Return one word of double-word value OP. HIGH_P is true to select the
+ high part or false to select the low part. */
+
+rtx
+riscv_subword (rtx op, bool high_p)
+{
+ unsigned int byte;
+ enum machine_mode mode;
+
+ mode = GET_MODE (op);
+ if (mode == VOIDmode)
+ mode = TARGET_64BIT ? TImode : DImode;
+
+ byte = high_p ? UNITS_PER_WORD : 0;
+
+ if (FP_REG_RTX_P (op))
+ return gen_rtx_REG (word_mode, REGNO (op) + high_p);
+
+ if (MEM_P (op))
+ return adjust_address (op, word_mode, byte);
+
+ return simplify_gen_subreg (word_mode, op, mode, byte);
+}
+
+/* Return true if a 64-bit move from SRC to DEST should be split into two. */
+
+bool
+riscv_split_64bit_move_p (rtx dest, rtx src)
+{
+ /* All 64b moves are legal in 64b mode. All 64b FPR <-> FPR and
+ FPR <-> MEM moves are legal in 32b mode, too. Although
+ FPR <-> GPR moves are not available in general in 32b mode,
+ we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
+ return !(TARGET_64BIT
+ || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
+ || (FP_REG_RTX_P (dest) && MEM_P (src))
+ || (FP_REG_RTX_P (src) && MEM_P (dest))
+ || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src))));
+}
+
+/* Split a doubleword move from SRC to DEST. On 32-bit targets,
+ this function handles 64-bit moves for which riscv_split_64bit_move_p
+ holds. For 64-bit targets, this function handles 128-bit moves. */
+
+void
+riscv_split_doubleword_move (rtx dest, rtx src)
+{
+ rtx low_dest;
+
+ /* The operation can be split into two normal moves. Decide in
+ which order to do them. */
+ low_dest = riscv_subword (dest, false);
+ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
+ {
+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
+ riscv_emit_move (low_dest, riscv_subword (src, false));
+ }
+ else
+ {
+ riscv_emit_move (low_dest, riscv_subword (src, false));
+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
+ }
+}
+
+/* Return the appropriate instructions to move SRC into DEST. Assume
+ that SRC is operand 1 and DEST is operand 0. */
+
+const char *
+riscv_output_move (rtx dest, rtx src)
+{
+ enum rtx_code dest_code, src_code;
+ enum machine_mode mode;
+ bool dbl_p;
+
+ dest_code = GET_CODE (dest);
+ src_code = GET_CODE (src);
+ mode = GET_MODE (dest);
+ dbl_p = (GET_MODE_SIZE (mode) == 8);
+
+ if (dbl_p && riscv_split_64bit_move_p (dest, src))
+ return "#";
+
+ if ((src_code == REG && GP_REG_P (REGNO (src)))
+ || (src == CONST0_RTX (mode)))
+ {
+ if (dest_code == REG)
+ {
+ if (GP_REG_P (REGNO (dest)))
+ return "mv\t%0,%z1";
+
+ if (FP_REG_P (REGNO (dest)))
+ {
+ if (!dbl_p)
+ return "fmv.s.x\t%0,%z1";
+ if (TARGET_64BIT)
+ return "fmv.d.x\t%0,%z1";
+ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
+ gcc_assert (src == CONST0_RTX (mode));
+ return "fcvt.d.w\t%0,x0";
+ }
+ }
+ if (dest_code == MEM)
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 1: return "sb\t%z1,%0";
+ case 2: return "sh\t%z1,%0";
+ case 4: return "sw\t%z1,%0";
+ case 8: return "sd\t%z1,%0";
+ }
+ }
+ if (dest_code == REG && GP_REG_P (REGNO (dest)))
+ {
+ if (src_code == REG)
+ {
+ if (FP_REG_P (REGNO (src)))
+ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
+ }
+
+ if (src_code == MEM)
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 1: return "lbu\t%0,%1";
+ case 2: return "lhu\t%0,%1";
+ case 4: return "lw\t%0,%1";
+ case 8: return "ld\t%0,%1";
+ }
+
+ if (src_code == CONST_INT)
+ return "li\t%0,%1";
+
+ if (src_code == HIGH)
+ return "lui\t%0,%h1";
+
+ if (symbolic_operand (src, VOIDmode))
+ switch (riscv_classify_symbolic_expression (src))
+ {
+ case SYMBOL_GOT_DISP: return "la\t%0,%1";
+ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
+ default: gcc_unreachable();
+ }
+ }
+ if (src_code == REG && FP_REG_P (REGNO (src)))
+ {
+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
+ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
+
+ if (dest_code == MEM)
+ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
+ }
+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
+ {
+ if (src_code == MEM)
+ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
+ }
+ gcc_unreachable ();
+}
+
+/* Return true if CMP1 is a suitable second operand for integer ordering
+ test CODE. See also the *sCC patterns in riscv.md. */
+
+static bool
+riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
+{
+ switch (code)
+ {
+ case GT:
+ case GTU:
+ return reg_or_0_operand (cmp1, VOIDmode);
+
+ case GE:
+ case GEU:
+ return cmp1 == const1_rtx;
+
+ case LT:
+ case LTU:
+ return arith_operand (cmp1, VOIDmode);
+
+ case LE:
+ return sle_operand (cmp1, VOIDmode);
+
+ case LEU:
+ return sleu_operand (cmp1, VOIDmode);
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return true if *CMP1 (of mode MODE) is a valid second operand for
+ integer ordering test *CODE, or if an equivalent combination can
+ be formed by adjusting *CODE and *CMP1. When returning true, update
+ *CODE and *CMP1 with the chosen code and operand, otherwise leave
+ them alone. */
+
+static bool
+riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
+ enum machine_mode mode)
+{
+ HOST_WIDE_INT plus_one;
+
+ if (riscv_int_order_operand_ok_p (*code, *cmp1))
+ return true;
+
+ if (CONST_INT_P (*cmp1))
+ switch (*code)
+ {
+ case LE:
+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
+ if (INTVAL (*cmp1) < plus_one)
+ {
+ *code = LT;
+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
+ return true;
+ }
+ break;
+
+ case LEU:
+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
+ if (plus_one != 0)
+ {
+ *code = LTU;
+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Compare CMP0 and CMP1 using ordering test CODE and store the result
+ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
+ is nonnull, it's OK to set TARGET to the inverse of the result and
+ flip *INVERT_PTR instead. */
+
+static void
+riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
+ rtx target, rtx cmp0, rtx cmp1)
+{
+ enum machine_mode mode;
+
+ /* First see if there is a RISCV instruction that can do this operation.
+ If not, try doing the same for the inverse operation. If that also
+ fails, force CMP1 into a register and try again. */
+ mode = GET_MODE (cmp0);
+ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
+ riscv_emit_binary (code, target, cmp0, cmp1);
+ else
+ {
+ enum rtx_code inv_code = reverse_condition (code);
+ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
+ {
+ cmp1 = force_reg (mode, cmp1);
+ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
+ }
+ else if (invert_ptr == 0)
+ {
+ rtx inv_target;
+
+ inv_target = riscv_force_binary (GET_MODE (target),
+ inv_code, cmp0, cmp1);
+ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
+ }
+ else
+ {
+ *invert_ptr = !*invert_ptr;
+ riscv_emit_binary (inv_code, target, cmp0, cmp1);
+ }
+ }
+}
+
+/* Return a register that is zero iff CMP0 and CMP1 are equal.
+ The register will have the same mode as CMP0. */
+
+static rtx
+riscv_zero_if_equal (rtx cmp0, rtx cmp1)
+{
+ if (cmp1 == const0_rtx)
+ return cmp0;
+
+ return expand_binop (GET_MODE (cmp0), sub_optab,
+ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
+}
+
+/* Return false if we can easily emit code for the FP comparison specified
+ by *CODE. If not, set *CODE to its inverse and return true. */
+
+static bool
+riscv_reversed_fp_cond (enum rtx_code *code)
+{
+ switch (*code)
+ {
+ case EQ:
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ case LTGT:
+ case ORDERED:
+ /* We know how to emit code for these cases... */
+ return false;
+
+ default:
+ /* ...but we must invert these and rely on the others. */
+ *code = reverse_condition_maybe_unordered (*code);
+ return true;
+ }
+}
+
+/* Convert a comparison into something that can be used in a branch or
+ conditional move. On entry, *OP0 and *OP1 are the values being
+ compared and *CODE is the code used to compare them.
+
+ Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
+
+static void
+riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
+{
+ rtx cmp_op0 = *op0;
+ rtx cmp_op1 = *op1;
+
+ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
+ {
+ if (splittable_const_int_operand (cmp_op1, VOIDmode))
+ {
+ HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
+ enum rtx_code new_code;
+
+ switch (*code)
+ {
+ case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
+ case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
+ case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
+ case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
+ case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
+ case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
+ case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
+ case GE: new_rhs = rhs - 1; new_code = GT;
+ try_new_rhs:
+ /* Convert e.g. OP0 > 4095 into OP0 >= 4096. */
+ if ((rhs < 0) == (new_rhs < 0)
+ && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
+ {
+ *op1 = GEN_INT (new_rhs);
+ *code = new_code;
+ }
+ break;
+
+ case EQ:
+ case NE:
+ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
+ if (SMALL_OPERAND (-rhs))
+ {
+ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
+ riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
+ *op1 = const0_rtx;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (*op1 != const0_rtx)
+ *op1 = force_reg (GET_MODE (cmp_op0), *op1);
+ }
+ else
+ {
+ /* For FP comparisons, set an integer register with the result of the
+ comparison, then branch on it. */
+ rtx tmp0, tmp1, final_op;
+ enum rtx_code fp_code = *code;
+ *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
+
+ switch (fp_code)
+ {
+ case ORDERED:
+ /* a == a && b == b */
+ tmp0 = gen_reg_rtx (SImode);
+ riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
+ tmp1 = gen_reg_rtx (SImode);
+ riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
+ final_op = gen_reg_rtx (SImode);
+ riscv_emit_binary (AND, final_op, tmp0, tmp1);
+ break;
+
+ case LTGT:
+ /* a < b || a > b */
+ tmp0 = gen_reg_rtx (SImode);
+ riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
+ tmp1 = gen_reg_rtx (SImode);
+ riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
+ final_op = gen_reg_rtx (SImode);
+ riscv_emit_binary (IOR, final_op, tmp0, tmp1);
+ break;
+
+ case EQ:
+ case LE:
+ case LT:
+ case GE:
+ case GT:
+ /* We have instructions for these cases. */
+ final_op = gen_reg_rtx (SImode);
+ riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Compare the binary result against 0. */
+ *op0 = final_op;
+ *op1 = const0_rtx;
+ }
+}
+
+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
+ and OPERAND[3]. Store the result in OPERANDS[0].
+
+ On 64-bit targets, the mode of the comparison and target will always be
+ SImode, thus possibly narrower than that of the comparison's operands. */
+
+void
+riscv_expand_scc (rtx operands[])
+{
+ rtx target = operands[0];
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2];
+ rtx op1 = operands[3];
+
+ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
+
+ if (code == EQ || code == NE)
+ {
+ rtx zie = riscv_zero_if_equal (op0, op1);
+ riscv_emit_binary (code, target, zie, const0_rtx);
+ }
+ else
+ riscv_emit_int_order_test (code, 0, target, op0, op1);
+}
+
+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
+ CODE and jump to OPERANDS[3] if the condition holds. */
+
+void
+riscv_expand_conditional_branch (rtx *operands)
+{
+ enum rtx_code code = GET_CODE (operands[0]);
+ rtx op0 = operands[1];
+ rtx op1 = operands[2];
+ rtx condition;
+
+ riscv_emit_compare (&code, &op0, &op1);
+ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+ emit_jump_insn (gen_condjump (condition, operands[3]));
+}
+
+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
+ least PARM_BOUNDARY bits of alignment, but will be given anything up
+ to STACK_BOUNDARY bits if the type requires it. */
+
+static unsigned int
+riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+ unsigned int alignment;
+
+ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
+ if (alignment < PARM_BOUNDARY)
+ alignment = PARM_BOUNDARY;
+ if (alignment > STACK_BOUNDARY)
+ alignment = STACK_BOUNDARY;
+ return alignment;
+}
+
+/* Fill INFO with information about a single argument. CUM is the
+ cumulative state for earlier arguments. MODE is the mode of this
+ argument and TYPE is its type (if known). NAMED is true if this
+ is a named (fixed) argument rather than a variable one. */
+
+static void
+riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
+ enum machine_mode mode, const_tree type, bool named)
+{
+ bool doubleword_aligned_p;
+ unsigned int num_bytes, num_words, max_regs;
+
+ /* Work out the size of the argument. */
+ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ /* Scalar, complex and vector floating-point types are passed in
+ floating-point registers, as long as this is a named rather
+ than a variable argument. */
+ info->fpr_p = (named
+ && (type == 0 || FLOAT_TYPE_P (type))
+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
+
+ /* Complex floats should only go into FPRs if there are two FPRs free,
+ otherwise they should be passed in the same way as a struct
+ containing two floats. */
+ if (info->fpr_p
+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
+ {
+ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
+ info->fpr_p = false;
+ else
+ num_words = 2;
+ }
+
+ /* See whether the argument has doubleword alignment. */
+ doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
+ > BITS_PER_WORD);
+
+ /* Set REG_OFFSET to the register count we're interested in.
+ The EABI allocates the floating-point registers separately,
+ but the other ABIs allocate them like integer registers. */
+ info->reg_offset = cum->num_gprs;
+
+ /* Advance to an even register if the argument is doubleword-aligned. */
+ if (doubleword_aligned_p)
+ info->reg_offset += info->reg_offset & 1;
+
+ /* Work out the offset of a stack argument. */
+ info->stack_offset = cum->stack_words;
+ if (doubleword_aligned_p)
+ info->stack_offset += info->stack_offset & 1;
+
+ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
+
+ /* Partition the argument between registers and stack. */
+ info->reg_words = MIN (num_words, max_regs);
+ info->stack_words = num_words - info->reg_words;
+}
+
+/* INFO describes a register argument that has the normal format for the
+ argument's mode. Return the register it uses, assuming that FPRs are
+ available if HARD_FLOAT_P. */
+
+static unsigned int
+riscv_arg_regno (const struct riscv_arg_info *info, bool hard_float_p)
+{
+ if (!info->fpr_p || !hard_float_p)
+ return GP_ARG_FIRST + info->reg_offset;
+ else
+ return FP_ARG_FIRST + info->reg_offset;
+}
+
+/* Implement TARGET_FUNCTION_ARG. */
+
+static rtx
+riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ struct riscv_arg_info info;
+
+ if (mode == VOIDmode)
+ return NULL;
+
+ riscv_get_arg_info (&info, cum, mode, type, named);
+
+ /* Return straight away if the whole argument is passed on the stack. */
+ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
+ return NULL;
+
+ /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
+ contains a double in its entirety, then that 64-bit chunk is passed
+ in a floating-point register. */
+ if (TARGET_HARD_FLOAT
+ && named
+ && type != 0
+ && TREE_CODE (type) == RECORD_TYPE
+ && TYPE_SIZE_UNIT (type)
+ && host_integerp (TYPE_SIZE_UNIT (type), 1))
+ //&& tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
+ {
+ tree field;
+
+ /* First check to see if there is any such field. */
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
+ && host_integerp (bit_position (field), 0)
+ //&& tree_fits_shwi_p (bit_position (field))
+ && int_bit_position (field) % BITS_PER_WORD == 0)
+ break;
+
+ if (field != 0)
+ {
+ /* Now handle the special case by returning a PARALLEL
+ indicating where each 64-bit chunk goes. INFO.REG_WORDS
+ chunks are passed in registers. */
+ unsigned int i;
+ HOST_WIDE_INT bitpos;
+ rtx ret;
+
+ /* assign_parms checks the mode of ENTRY_PARM, so we must
+ use the actual mode here. */
+ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
+
+ bitpos = 0;
+ field = TYPE_FIELDS (type);
+ for (i = 0; i < info.reg_words; i++)
+ {
+ rtx reg;
+
+ for (; field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL
+ && int_bit_position (field) >= bitpos)
+ break;
+
+ if (field
+ && int_bit_position (field) == bitpos
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
+ reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
+ else
+ reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
+
+ XVECEXP (ret, 0, i)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
+ GEN_INT (bitpos / BITS_PER_UNIT));
+
+ bitpos += BITS_PER_WORD;
+ }
+ return ret;
+ }
+ }
+
+ /* Handle the n32/n64 conventions for passing complex floating-point
+ arguments in FPR pairs. The real part goes in the lower register
+ and the imaginary part goes in the upper register. */
+ if (info.fpr_p
+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ rtx real, imag;
+ enum machine_mode inner;
+ unsigned int regno;
+
+ inner = GET_MODE_INNER (mode);
+ regno = FP_ARG_FIRST + info.reg_offset;
+ if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
+ {
+ /* Real part in registers, imaginary part on stack. */
+ gcc_assert (info.stack_words == info.reg_words);
+ return gen_rtx_REG (inner, regno);
+ }
+ else
+ {
+ gcc_assert (info.stack_words == 0);
+ real = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (inner, regno),
+ const0_rtx);
+ imag = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (inner,
+ regno + info.reg_words / 2),
+ GEN_INT (GET_MODE_SIZE (inner)));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
+ }
+ }
+
+ return gen_rtx_REG (mode, riscv_arg_regno (&info, TARGET_HARD_FLOAT));
+}
+
+/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
+
+static void
+riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ struct riscv_arg_info info;
+
+ riscv_get_arg_info (&info, cum, mode, type, named);
+
+ /* Advance the register count. This has the effect of setting
+ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
+ argument required us to skip the final GPR and pass the whole
+ argument on the stack. */
+ cum->num_gprs = info.reg_offset + info.reg_words;
+
+ /* Advance the stack word count. */
+ if (info.stack_words > 0)
+ cum->stack_words = info.stack_offset + info.stack_words;
+}
+
+/* Implement TARGET_ARG_PARTIAL_BYTES. */
+
+static int
+riscv_arg_partial_bytes (cumulative_args_t cum,
+ enum machine_mode mode, tree type, bool named)
+{
+ struct riscv_arg_info info;
+
+ riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
+ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
+}
+
+/* See whether VALTYPE is a record whose fields should be returned in
+ floating-point registers. If so, return the number of fields and
+ list them in FIELDS (which should have two elements). Return 0
+ otherwise.
+
+ For n32 & n64, a structure with one or two fields is returned in
+ floating-point registers as long as every field has a floating-point
+ type. */
+
+static int
+riscv_fpr_return_fields (const_tree valtype, tree *fields)
+{
+ tree field;
+ int i;
+
+ if (TREE_CODE (valtype) != RECORD_TYPE)
+ return 0;
+
+ i = 0;
+ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 0;
+
+ if (i == 2)
+ return 0;
+
+ fields[i++] = field;
+ }
+ return i;
+}
+
+/* Return true if the function return value MODE will get returned in a
+ floating-point register. */
+
+static bool
+riscv_return_mode_in_fpr_p (enum machine_mode mode)
+{
+ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
+}
+
+/* Return the representation of an FPR return register when the
+ value being returned in FP_RETURN has mode VALUE_MODE and the
+ return type itself has mode TYPE_MODE. On NewABI targets,
+ the two modes may be different for structures like:
+
+ struct __attribute__((packed)) foo { float f; }
+
+ where we return the SFmode value of "f" in FP_RETURN, but where
+ the structure itself has mode BLKmode. */
+
+static rtx
+riscv_return_fpr_single (enum machine_mode type_mode,
+ enum machine_mode value_mode)
+{
+ rtx x;
+
+ x = gen_rtx_REG (value_mode, FP_RETURN);
+ if (type_mode != value_mode)
+ {
+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
+ }
+ return x;
+}
+
+/* Return a composite value in a pair of floating-point registers.
+ MODE1 and OFFSET1 are the mode and byte offset for the first value,
+ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
+ complete value.
+
+ For n32 & n64, $f0 always holds the first value and $f2 the second.
+ Otherwise the values are packed together as closely as possible. */
+
+static rtx
+riscv_return_fpr_pair (enum machine_mode mode,
+ enum machine_mode mode1, HOST_WIDE_INT offset1,
+ enum machine_mode mode2, HOST_WIDE_INT offset2)
+{
+ return gen_rtx_PARALLEL
+ (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode1, FP_RETURN),
+ GEN_INT (offset1)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode2, FP_RETURN + 1),
+ GEN_INT (offset2))));
+
+}
+
+/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
+ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
+ VALTYPE is null and MODE is the mode of the return value. */
+
+rtx
+riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
+{
+ if (valtype)
+ {
+ tree fields[2];
+ int unsigned_p;
+
+ mode = TYPE_MODE (valtype);
+ unsigned_p = TYPE_UNSIGNED (valtype);
+
+ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
+ return values, promote the mode here too. */
+ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
+
+ /* Handle structures whose fields are returned in $f0/$f2. */
+ switch (riscv_fpr_return_fields (valtype, fields))
+ {
+ case 1:
+ return riscv_return_fpr_single (mode,
+ TYPE_MODE (TREE_TYPE (fields[0])));
+
+ case 2:
+ return riscv_return_fpr_pair (mode,
+ TYPE_MODE (TREE_TYPE (fields[0])),
+ int_byte_position (fields[0]),
+ TYPE_MODE (TREE_TYPE (fields[1])),
+ int_byte_position (fields[1]));
+ }
+
+ /* Only use FPRs for scalar, complex or vector types. */
+ if (!FLOAT_TYPE_P (valtype))
+ return gen_rtx_REG (mode, GP_RETURN);
+ }
+
+ /* Handle long doubles for n32 & n64. */
+ if (mode == TFmode)
+ return riscv_return_fpr_pair (mode,
+ DImode, 0,
+ DImode, GET_MODE_SIZE (mode) / 2);
+
+ if (riscv_return_mode_in_fpr_p (mode))
+ {
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ return riscv_return_fpr_pair (mode,
+ GET_MODE_INNER (mode), 0,
+ GET_MODE_INNER (mode),
+ GET_MODE_SIZE (mode) / 2);
+ else
+ return gen_rtx_REG (mode, FP_RETURN);
+ }
+
+ return gen_rtx_REG (mode, GP_RETURN);
+}
+
+/* Implement TARGET_RETURN_IN_MEMORY. Scalars and small structures
+ that fit in two registers are returned in a0/a1. */
+
+static bool
+riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+{
+ return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
+}
+
+/* Implement TARGET_PASS_BY_REFERENCE. */
+
+static bool
+riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ if (type && riscv_return_in_memory (type, NULL_TREE))
+ return true;
+ return targetm.calls.must_pass_in_stack (mode, type);
+}
+
+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS local_cum;
+ int gp_saved;
+
+ /* The caller has advanced CUM up to, but not beyond, the last named
+ argument. Advance a local copy of CUM past the last "real" named
+ argument, to find out how many registers are left over. */
+ local_cum = *get_cumulative_args (cum);
+ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
+
+ /* Found out how many registers we need to save. */
+ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
+
+ if (!no_rtl && gp_saved > 0)
+ {
+ rtx ptr, mem;
+
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
+ REG_PARM_STACK_SPACE (cfun->decl)
+ - gp_saved * UNITS_PER_WORD);
+ mem = gen_frame_mem (BLKmode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+
+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
+ mem, gp_saved);
+ }
+ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
+ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
+
+static void
+riscv_va_start (tree valist, rtx nextarg)
+{
+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
+ std_expand_builtin_va_start (valist, nextarg);
+}
+
+/* Expand a call of type TYPE. RESULT is where the result will go (null
+ for "call"s and "sibcall"s), ADDR is the address of the function,
+ ARGS_SIZE is the size of the arguments and AUX is the value passed
+ to us by riscv_function_arg. Return the call itself. */
+
+rtx
+riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
+{
+ rtx pattern;
+
+ if (!call_insn_operand (addr, VOIDmode))
+ {
+ rtx reg = RISCV_EPILOGUE_TEMP (Pmode);
+ riscv_emit_move (reg, addr);
+ addr = reg;
+ }
+
+ if (result == 0)
+ {
+ rtx (*fn) (rtx, rtx);
+
+ if (sibcall_p)
+ fn = gen_sibcall_internal;
+ else
+ fn = gen_call_internal;
+
+ pattern = fn (addr, args_size);
+ }
+ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
+ {
+ /* Handle return values created by riscv_return_fpr_pair. */
+ rtx (*fn) (rtx, rtx, rtx, rtx);
+ rtx reg1, reg2;
+
+ if (sibcall_p)
+ fn = gen_sibcall_value_multiple_internal;
+ else
+ fn = gen_call_value_multiple_internal;
+
+ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
+ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
+ pattern = fn (reg1, addr, args_size, reg2);
+ }
+ else
+ {
+ rtx (*fn) (rtx, rtx, rtx);
+
+ if (sibcall_p)
+ fn = gen_sibcall_value_internal;
+ else
+ fn = gen_call_value_internal;
+
+ /* Handle return values created by riscv_return_fpr_single. */
+ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
+ result = XEXP (XVECEXP (result, 0, 0), 0);
+ pattern = fn (result, addr, args_size);
+ }
+
+ return emit_call_insn (pattern);
+}
+
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+ Assume that the areas do not overlap. */
+
+static void
+riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
+{
+ HOST_WIDE_INT offset, delta;
+ unsigned HOST_WIDE_INT bits;
+ int i;
+ enum machine_mode mode;
+ rtx *regs;
+
+ bits = MAX( BITS_PER_UNIT,
+ MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
+
+ mode = mode_for_size (bits, MODE_INT, 0);
+ delta = bits / BITS_PER_UNIT;
+
+ /* Allocate a buffer for the temporary registers. */
+ regs = XALLOCAVEC (rtx, length / delta);
+
+ /* Load as many BITS-sized chunks as possible. Use a normal load if
+ the source has enough alignment, otherwise use left/right pairs. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ {
+ regs[i] = gen_reg_rtx (mode);
+ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
+ }
+
+ /* Copy the chunks to the destination. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
+
+ /* Mop up any left-over bytes. */
+ if (offset < length)
+ {
+ src = adjust_address (src, BLKmode, offset);
+ dest = adjust_address (dest, BLKmode, offset);
+ move_by_pieces (dest, src, length - offset,
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
+ }
+}
+
+/* Helper function for doing a loop-based block operation on memory
+ reference MEM. Each iteration of the loop will operate on LENGTH
+ bytes of MEM.
+
+ Create a new base register for use within the loop and point it to
+ the start of MEM. Create a new memory reference that uses this
+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
+
+static void
+riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
+ rtx *loop_reg, rtx *loop_mem)
+{
+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
+
+ /* Although the new mem does not refer to a known location,
+ it does keep up to LENGTH bytes of alignment. */
+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
+}
+
+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
+ the memory regions do not overlap. */
+
+static void
+riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+ HOST_WIDE_INT bytes_per_iter)
+{
+ rtx label, src_reg, dest_reg, final_src, test;
+ HOST_WIDE_INT leftover;
+
+ leftover = length % bytes_per_iter;
+ length -= leftover;
+
+ /* Create registers and memory references for use within the loop. */
+ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
+ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
+
+ /* Calculate the value that SRC_REG should have after the last iteration
+ of the loop. */
+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
+ 0, 0, OPTAB_WIDEN);
+
+ /* Emit the start of the loop. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ /* Emit the loop body. */
+ riscv_block_move_straight (dest, src, bytes_per_iter);
+
+ /* Move on to the next block. */
+ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
+ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
+
+ /* Emit the loop condition. */
+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
+ if (Pmode == DImode)
+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
+ else
+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
+
+ /* Mop up any left-over bytes. */
+ if (leftover)
+ riscv_block_move_straight (dest, src, leftover);
+}
+
+/* Expand a movmemsi instruction, which copies LENGTH bytes from
+ memory reference SRC to memory reference DEST. */
+
+bool
+riscv_expand_block_move (rtx dest, rtx src, rtx length)
+{
+ if (CONST_INT_P (length))
+ {
+ HOST_WIDE_INT factor, align;
+
+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
+ factor = BITS_PER_WORD / align;
+
+ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
+ {
+ riscv_block_move_straight (dest, src, INTVAL (length));
+ return true;
+ }
+ else if (optimize && align >= BITS_PER_WORD)
+ {
+ riscv_block_move_loop (dest, src, INTVAL (length),
+ RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* (Re-)Initialize riscv_lo_relocs and riscv_hi_relocs. */
+
+static void
+riscv_init_relocs (void)
+{
+ memset (riscv_hi_relocs, '\0', sizeof (riscv_hi_relocs));
+ memset (riscv_lo_relocs, '\0', sizeof (riscv_lo_relocs));
+
+ if (!flag_pic && riscv_cmodel == CM_MEDLOW)
+ {
+ riscv_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
+ riscv_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
+ }
+
+ if (!flag_pic || flag_pie)
+ {
+ riscv_hi_relocs[SYMBOL_TLS_LE] = "%tprel_hi(";
+ riscv_lo_relocs[SYMBOL_TLS_LE] = "%tprel_lo(";
+ }
+}
+
+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
+ in context CONTEXT. RELOCS is the array of relocations to use. */
+
+static void
+riscv_print_operand_reloc (FILE *file, rtx op, const char **relocs)
+{
+ enum riscv_symbol_type symbol_type;
+ const char *p;
+
+ symbol_type = riscv_classify_symbolic_expression (op);
+ gcc_assert (relocs[symbol_type]);
+
+ fputs (relocs[symbol_type], file);
+ output_addr_const (file, riscv_strip_unspec_address (op));
+ for (p = relocs[symbol_type]; *p != 0; p++)
+ if (*p == '(')
+ fputc (')', file);
+}
+
+static const char *
+riscv_memory_model_suffix (enum memmodel model)
+{
+ switch (model)
+ {
+ case MEMMODEL_ACQ_REL:
+ case MEMMODEL_SEQ_CST:
+ return ".sc";
+ case MEMMODEL_ACQUIRE:
+ case MEMMODEL_CONSUME:
+ return ".aq";
+ case MEMMODEL_RELEASE:
+ return ".rl";
+ case MEMMODEL_RELAXED:
+ return "";
+ default: gcc_unreachable();
+ }
+}
+
+/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
+
+ 'h' Print the high-part relocation associated with OP, after stripping
+ any outermost HIGH.
+ 'R' Print the low-part relocation associated with OP.
+ 'C' Print the integer branch condition for comparison OP.
+ 'A' Print the atomic operation suffix for memory model OP.
+ 'z' Print $0 if OP is zero, otherwise print OP normally. */
+
+static void
+riscv_print_operand (FILE *file, rtx op, int letter)
+{
+ enum rtx_code code;
+
+ gcc_assert (op);
+ code = GET_CODE (op);
+
+ switch (letter)
+ {
+ case 'h':
+ if (code == HIGH)
+ op = XEXP (op, 0);
+ riscv_print_operand_reloc (file, op, riscv_hi_relocs);
+ break;
+
+ case 'R':
+ riscv_print_operand_reloc (file, op, riscv_lo_relocs);
+ break;
+
+ case 'C':
+ /* The RTL names match the instruction names. */
+ fputs (GET_RTX_NAME (code), file);
+ break;
+
+ case 'A':
+ fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
+ break;
+
+ default:
+ switch (code)
+ {
+ case REG:
+ if (letter && letter != 'z')
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ fprintf (file, "%s", reg_names[REGNO (op)]);
+ break;
+
+ case MEM:
+ if (letter == 'y')
+ fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
+ else if (letter && letter != 'z')
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ else
+ output_address (XEXP (op, 0));
+ break;
+
+ default:
+ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
+ fputs (reg_names[GP_REG_FIRST], file);
+ else if (letter && letter != 'z')
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ else
+ output_addr_const (file, riscv_strip_unspec_address (op));
+ break;
+ }
+ }
+}
+
+/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
+
+static void
+riscv_print_operand_address (FILE *file, rtx x)
+{
+ struct riscv_address_info addr;
+
+ if (riscv_classify_address (&addr, x, word_mode, true))
+ switch (addr.type)
+ {
+ case ADDRESS_REG:
+ riscv_print_operand (file, addr.offset, 0);
+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
+ return;
+
+ case ADDRESS_LO_SUM:
+ riscv_print_operand_reloc (file, addr.offset, riscv_lo_relocs);
+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
+ return;
+
+ case ADDRESS_CONST_INT:
+ output_addr_const (file, x);
+ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
+ return;
+
+ case ADDRESS_SYMBOLIC:
+ output_addr_const (file, riscv_strip_unspec_address (x));
+ return;
+ }
+ gcc_unreachable ();
+}
+
+static bool
+riscv_size_ok_for_small_data_p (int size)
+{
+ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
+}
+
+/* Return true if EXP should be placed in the small data section. */
+
+static bool
+riscv_in_small_data_p (const_tree x)
+{
+ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
+ {
+ const char *sec = TREE_STRING_POINTER (DECL_SECTION_NAME (x));
+ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
+ }
+
+ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
+}
+
+/* Return a section for X, handling small data. */
+
+static section *
+riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ section *s = default_elf_select_rtx_section (mode, x, align);
+
+ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
+ {
+ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
+ {
+ /* Rename .rodata.cst* to .srodata.cst*. */
+ char name[32];
+ sprintf (name, ".s%s", s->named.name + 1);
+ return get_section (name, s->named.common.flags, NULL);
+ }
+
+ if (s == data_section)
+ return sdata_section;
+ }
+
+ return s;
+}
+
+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
+
+static void ATTRIBUTE_UNUSED
+riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ switch (size)
+ {
+ case 4:
+ fputs ("\t.dtprelword\t", file);
+ break;
+
+ case 8:
+ fputs ("\t.dtpreldword\t", file);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ output_addr_const (file, x);
+ fputs ("+0x800", file);
+}
+
+/* Make the last instruction frame-related and note that it performs
+ the operation described by FRAME_PATTERN. */
+
+static void
+riscv_set_frame_expr (rtx frame_pattern)
+{
+ rtx insn;
+
+ insn = get_last_insn ();
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ frame_pattern,
+ REG_NOTES (insn));
+}
+
+/* Return a frame-related rtx that stores REG at MEM.
+ REG must be a single register. */
+
+static rtx
+riscv_frame_set (rtx mem, rtx reg)
+{
+ rtx set;
+
+ set = gen_rtx_SET (VOIDmode, mem, reg);
+ RTX_FRAME_RELATED_P (set) = 1;
+
+ return set;
+}
+
+/* Return true if the current function must save register REGNO. */
+
+static bool
+riscv_save_reg_p (unsigned int regno)
+{
+ bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
+ bool might_clobber = crtl->saves_all_registers
+ || df_regs_ever_live_p (regno)
+ || (regno == HARD_FRAME_POINTER_REGNUM
+ && frame_pointer_needed);
+
+ return (call_saved && might_clobber)
+ || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
+}
+
+/* Populate the current function's riscv_frame_info structure.
+
+ RISC-V stack frames grown downward. High addresses are at the top.
+
+ +-------------------------------+
+ | |
+ | incoming stack arguments |
+ | |
+ +-------------------------------+ <-- incoming stack pointer
+ | |
+ | callee-allocated save area |
+ | for arguments that are |
+ | split between registers and |
+ | the stack |
+ | |
+ +-------------------------------+ <-- arg_pointer_rtx
+ | |
+ | callee-allocated save area |
+ | for register varargs |
+ | |
+ +-------------------------------+ <-- hard_frame_pointer_rtx;
+ | | stack_pointer_rtx + gp_sp_offset
+ | GPR save area | + UNITS_PER_WORD
+ | |
+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
+ | | + UNITS_PER_HWVALUE
+ | FPR save area |
+ | |
+ +-------------------------------+ <-- frame_pointer_rtx (virtual)
+ | |
+ | local variables |
+ | |
+ P +-------------------------------+
+ | |
+ | outgoing stack arguments |
+ | |
+ +-------------------------------+ <-- stack_pointer_rtx
+
+ Dynamic stack allocations such as alloca insert data at point P.
+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
+ hard_frame_pointer_rtx unchanged. */
+
+static void
+riscv_compute_frame_info (void)
+{
+ struct riscv_frame_info *frame;
+ HOST_WIDE_INT offset;
+ unsigned int regno, i;
+
+ frame = &cfun->machine->frame;
+ memset (frame, 0, sizeof (*frame));
+
+ /* Find out which GPRs we need to save. */
+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
+ if (riscv_save_reg_p (regno))
+ frame->mask |= 1 << (regno - GP_REG_FIRST);
+
+ /* If this function calls eh_return, we must also save and restore the
+ EH data registers. */
+ if (crtl->calls_eh_return)
+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
+ frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
+
+ /* Find out which FPRs we need to save. This loop must iterate over
+ the same space as its companion in riscv_for_each_saved_gpr_and_fpr. */
+ if (TARGET_HARD_FLOAT)
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
+ if (riscv_save_reg_p (regno))
+ frame->fmask |= 1 << (regno - FP_REG_FIRST);
+
+ /* At the bottom of the frame are any outgoing stack arguments. */
+ offset = crtl->outgoing_args_size;
+ /* Next are local stack variables. */
+ offset += RISCV_STACK_ALIGN (get_frame_size ());
+ /* The virtual frame pointer points above the local variables. */
+ frame->frame_pointer_offset = offset;
+ /* Next are the callee-saved FPRs. */
+ if (frame->fmask)
+ {
+ unsigned num_saved = __builtin_popcount(frame->fmask);
+ offset += RISCV_STACK_ALIGN (num_saved * UNITS_PER_FPREG);
+ frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
+ }
+ /* Next are the callee-saved GPRs. */
+ if (frame->mask)
+ {
+ unsigned num_saved = __builtin_popcount(frame->mask);
+ offset += RISCV_STACK_ALIGN (num_saved * UNITS_PER_WORD);
+ frame->gp_sp_offset = offset - UNITS_PER_WORD;
+ }
+ /* The hard frame pointer points above the callee-saved GPRs. */
+ frame->hard_frame_pointer_offset = offset;
+ /* Above the hard frame pointer is the callee-allocated varags save area. */
+ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
+ frame->arg_pointer_offset = offset;
+ /* Next is the callee-allocated area for pretend stack arguments. */
+ offset += crtl->args.pretend_args_size;
+ frame->total_size = offset;
+ /* Next points the incoming stack pointer and any incoming arguments. */
+}
+
+/* Make sure that we're not trying to eliminate to the wrong hard frame
+ pointer. */
+
+static bool
+riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
+}
+
+/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
+ or argument pointer. TO is either the stack pointer or hard frame
+ pointer. */
+
+HOST_WIDE_INT
+riscv_initial_elimination_offset (int from, int to)
+{
+ HOST_WIDE_INT src, dest;
+
+ riscv_compute_frame_info ();
+
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ dest = cfun->machine->frame.hard_frame_pointer_offset;
+ else if (to == STACK_POINTER_REGNUM)
+ dest = 0; /* this is the base of all offsets */
+ else
+ gcc_unreachable ();
+
+ if (from == FRAME_POINTER_REGNUM)
+ src = cfun->machine->frame.frame_pointer_offset;
+ else if (from == ARG_POINTER_REGNUM)
+ src = cfun->machine->frame.arg_pointer_offset;
+ else
+ gcc_unreachable ();
+
+ return src - dest;
+}
+
+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
+ previous frame. */
+
+rtx
+riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
+}
+
+/* Emit code to change the current function's return address to
+ ADDRESS. SCRATCH is available as a scratch register, if needed.
+ ADDRESS and SCRATCH are both word-mode GPRs. */
+
+void
+riscv_set_return_address (rtx address, rtx scratch)
+{
+ rtx slot_address;
+
+ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
+ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
+ cfun->machine->frame.gp_sp_offset);
+ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
+}
+
+/* A function to save or store a register. The first argument is the
+ register and the second is the stack slot. */
+typedef void (*riscv_save_restore_fn) (rtx, rtx);
+
+/* Use FN to save or restore register REGNO. MODE is the register's
+ mode and OFFSET is the offset of its save slot from the current
+ stack pointer. */
+
+static void
+riscv_save_restore_reg (enum machine_mode mode, int regno,
+ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
+{
+ rtx mem;
+
+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
+ fn (gen_rtx_REG (mode, regno), mem);
+}
+
+/* Call FN for each register that is saved by the current function.
+ SP_OFFSET is the offset of the current stack pointer from the start
+ of the frame. */
+
+static void
+riscv_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
+ riscv_save_restore_fn fn)
+{
+ HOST_WIDE_INT offset;
+ int regno;
+
+ /* Save the link register and s-registers. */
+ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
+ {
+ riscv_save_restore_reg (word_mode, regno, offset, fn);
+ offset -= UNITS_PER_WORD;
+ }
+
+ /* This loop must iterate over the same space as its companion in
+ riscv_compute_frame_info. */
+ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
+ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
+ {
+ riscv_save_restore_reg (DFmode, regno, offset, fn);
+ offset -= GET_MODE_SIZE (DFmode);
+ }
+}
+
+/* Emit a move from SRC to DEST, given that one of them is a register
+ save slot and that the other is a register. TEMP is a temporary
+ GPR of the same mode that is available if need be. */
+
+static void
+riscv_emit_save_slot_move (rtx dest, rtx src, rtx temp)
+{
+ unsigned int regno;
+ rtx mem;
+ enum reg_class rclass;
+
+ if (REG_P (src))
+ {
+ regno = REGNO (src);
+ mem = dest;
+ }
+ else
+ {
+ regno = REGNO (dest);
+ mem = src;
+ }
+
+ rclass = riscv_secondary_reload_class (REGNO_REG_CLASS (regno),
+ GET_MODE (mem), mem, mem == src);
+
+ if (rclass == NO_REGS)
+ riscv_emit_move (dest, src);
+ else
+ {
+ gcc_assert (!reg_overlap_mentioned_p (dest, temp));
+ riscv_emit_move (temp, src);
+ riscv_emit_move (dest, temp);
+ }
+ if (MEM_P (dest))
+ riscv_set_frame_expr (riscv_frame_set (dest, src));
+}
+
+/* Save register REG to MEM. Make the instruction frame-related. */
+
+static void
+riscv_save_reg (rtx reg, rtx mem)
+{
+ riscv_emit_save_slot_move (mem, reg, RISCV_PROLOGUE_TEMP (GET_MODE (reg)));
+}
+
+
+/* Expand the "prologue" pattern. */
+
+void
+riscv_expand_prologue (void)
+{
+ const struct riscv_frame_info *frame;
+ HOST_WIDE_INT size;
+ rtx insn;
+
+ frame = &cfun->machine->frame;
+ size = frame->total_size;
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = size;
+
+ /* Save the registers. Allocate up to RISCV_MAX_FIRST_STACK_STEP
+ bytes beforehand; this is enough to cover the register save area
+ without going out of range. */
+ if ((frame->mask | frame->fmask) != 0)
+ {
+ HOST_WIDE_INT step1;
+
+ step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
+ insn = gen_add3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-step1));
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+ size -= step1;
+ riscv_for_each_saved_gpr_and_fpr (size, riscv_save_reg);
+ }
+
+ /* Set up the frame pointer, if we're using one. */
+ if (frame_pointer_needed)
+ {
+ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (frame->hard_frame_pointer_offset - size));
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+ }
+
+ /* Allocate the rest of the frame. */
+ if (size > 0)
+ {
+ if (SMALL_OPERAND (-size))
+ RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-size)))) = 1;
+ else
+ {
+ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (size));
+ emit_insn (gen_sub3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ RISCV_PROLOGUE_TEMP (Pmode)));
+
+ /* Describe the combined effect of the previous instructions. */
+ riscv_set_frame_expr
+ (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, -size)));
+ }
+ }
+}
+
+/* Emit instructions to restore register REG from slot MEM. */
+
+static void
+riscv_restore_reg (rtx reg, rtx mem)
+{
+ riscv_emit_save_slot_move (reg, mem, RISCV_EPILOGUE_TEMP (GET_MODE (reg)));
+}
+
+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
+ says which. */
+
+void
+riscv_expand_epilogue (bool sibcall_p)
+{
+ const struct riscv_frame_info *frame;
+ HOST_WIDE_INT step1, step2;
+
+ if (!sibcall_p && riscv_can_use_return_insn ())
+ {
+ emit_jump_insn (gen_return ());
+ return;
+ }
+
+ /* Split the frame into two. STEP1 is the amount of stack we should
+ deallocate before restoring the registers. STEP2 is the amount we
+ should deallocate afterwards.
+
+ Start off by assuming that no registers need to be restored. */
+ frame = &cfun->machine->frame;
+ step1 = frame->total_size;
+ step2 = 0;
+
+ /* Move past any dynamic stack allocations. */
+ if (cfun->calls_alloca)
+ {
+ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
+ if (!SMALL_INT (adjust))
+ {
+ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
+ adjust = RISCV_EPILOGUE_TEMP (Pmode);
+ }
+
+ emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, adjust));
+ }
+
+ /* If we need to restore registers, deallocate as much stack as
+ possible in the second step without going out of range. */
+ if ((frame->mask | frame->fmask) != 0)
+ {
+ step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
+ step1 -= step2;
+ }
+
+ /* Set TARGET to BASE + STEP1. */
+ if (step1 > 0)
+ {
+ /* Get an rtx for STEP1 that we can add to BASE. */
+ rtx adjust = GEN_INT (step1);
+ if (!SMALL_OPERAND (step1))
+ {
+ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
+ adjust = RISCV_EPILOGUE_TEMP (Pmode);
+ }
+
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
+ }
+
+ /* Restore the registers. */
+ riscv_for_each_saved_gpr_and_fpr (frame->total_size - step2,
+ riscv_restore_reg);
+
+ /* Deallocate the final bit of the frame. */
+ if (step2 > 0)
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (step2)));
+
+ /* Add in the __builtin_eh_return stack adjustment. */
+ if (crtl->calls_eh_return)
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
+ EH_RETURN_STACKADJ_RTX));
+
+ if (!sibcall_p)
+ {
+ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
+ emit_jump_insn (gen_simple_return_internal (ra));
+ }
+}
+
+/* Return nonzero if this function is known to have a null epilogue.
+ This allows the optimizer to omit jumps to jumps if no stack
+ was created. */
+
+bool
+riscv_can_use_return_insn (void)
+{
+ return reload_completed && cfun->machine->frame.total_size == 0;
+}
+
+/* Return true if register REGNO can store a value of mode MODE.
+ The result of this function is cached in riscv_hard_regno_mode_ok. */
+
+static bool
+riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
+{
+ unsigned int size = GET_MODE_SIZE (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
+
+ /* This is hella bogus but ira_build segfaults on RV32 without it. */
+ if (VECTOR_MODE_P (mode))
+ return true;
+
+ if (GP_REG_P (regno))
+ {
+ if (size <= UNITS_PER_WORD)
+ return true;
+
+ /* Double-word values must be even-register-aligned. */
+ if (size <= 2 * UNITS_PER_WORD)
+ return regno % 2 == 0;
+ }
+
+ if (FP_REG_P (regno))
+ {
+ if (mclass == MODE_FLOAT
+ || mclass == MODE_COMPLEX_FLOAT
+ || mclass == MODE_VECTOR_FLOAT)
+ return size <= UNITS_PER_FPVALUE;
+ }
+
+ return false;
+}
+
+/* Implement HARD_REGNO_NREGS. */
+
+unsigned int
+riscv_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ if (FP_REG_P (regno))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
+
+ /* All other registers are word-sized. */
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+}
+
+/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
+ in riscv_hard_regno_nregs. */
+
+int
+riscv_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
+{
+ int size;
+ HARD_REG_SET left;
+
+ size = 0x8000;
+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
+ {
+ size = MIN (size, UNITS_PER_FPREG);
+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
+ }
+ if (!hard_reg_set_empty_p (left))
+ size = MIN (size, UNITS_PER_WORD);
+ return (GET_MODE_SIZE (mode) + size - 1) / size;
+}
+
+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
+
+static reg_class_t
+riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
+{
+ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
+ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
+ rclass;
+}
+
+/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
+ Return a "canonical" class to represent it in later calculations. */
+
+static reg_class_t
+riscv_canonicalize_move_class (reg_class_t rclass)
+{
+ if (reg_class_subset_p (rclass, GENERAL_REGS))
+ rclass = GENERAL_REGS;
+
+ return rclass;
+}
+
+/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
+ maximum of the move costs for subclasses; regclass will work out
+ the maximum for us. */
+
+static int
+riscv_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from, reg_class_t to)
+{
+ from = riscv_canonicalize_move_class (from);
+ to = riscv_canonicalize_move_class (to);
+
+ if ((from == GENERAL_REGS && to == GENERAL_REGS)
+ || (from == GENERAL_REGS && to == FP_REGS)
+ || (from == FP_REGS && to == FP_REGS))
+ return COSTS_N_INSNS (1);
+
+ if (from == FP_REGS && to == GENERAL_REGS)
+ return tune_info->fp_to_int_cost;
+
+ return 0;
+}
+
+/* Implement TARGET_MEMORY_MOVE_COST. */
+
+static int
+riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
+{
+ return (tune_info->memory_cost
+ + memory_move_secondary_cost (mode, rclass, in));
+}
+
+/* Return the register class required for a secondary register when
+ copying between one of the registers in RCLASS and value X, which
+ has mode MODE. X is the source of the move if IN_P, otherwise it
+ is the destination. Return NO_REGS if no secondary register is
+ needed. */
+
+enum reg_class
+riscv_secondary_reload_class (enum reg_class rclass,
+ enum machine_mode mode, rtx x,
+ bool in_p ATTRIBUTE_UNUSED)
+{
+ int regno;
+
+ regno = true_regnum (x);
+
+ if (reg_class_subset_p (rclass, FP_REGS))
+ {
+ if (MEM_P (x) && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
+ /* We can use flw/fld/fsw/fsd. */
+ return NO_REGS;
+
+ if (GP_REG_P (regno) || x == CONST0_RTX (mode))
+ /* We can use fmv or go through memory when mode > Pmode. */
+ return NO_REGS;
+
+ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
+ /* We can force the constant to memory and use flw/fld. */
+ return NO_REGS;
+
+ if (FP_REG_P (regno))
+ /* We can use fmv.fmt. */
+ return NO_REGS;
+
+ /* Otherwise, we need to reload through an integer register. */
+ return GR_REGS;
+ }
+ if (FP_REG_P (regno))
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
+
+ return NO_REGS;
+}
+
+/* Implement TARGET_MODE_REP_EXTENDED. */
+
+static int
+riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
+{
+ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
+ return SIGN_EXTEND;
+
+ return UNKNOWN;
+}
+
+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
+
+static bool
+riscv_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (ALL_FIXED_POINT_MODE_P (mode)
+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
+ return true;
+
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
+ dependencies have no cost. */
+
+static int
+riscv_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
+ rtx dep ATTRIBUTE_UNUSED, int cost)
+{
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+ return cost;
+}
+
+/* Return the number of instructions that can be issued per cycle. */
+
+static int
+riscv_issue_rate (void)
+{
+ return tune_info->issue_rate;
+}
+
+/* This structure describes a single built-in function. */
+struct riscv_builtin_description {
+ /* The code of the main .md file instruction. See riscv_builtin_type
+ for more information. */
+ enum insn_code icode;
+
+ /* The name of the built-in function. */
+ const char *name;
+
+ /* Specifies how the function should be expanded. */
+ enum riscv_builtin_type builtin_type;
+
+ /* The function's prototype. */
+ enum riscv_function_type function_type;
+
+ /* Whether the function is available. */
+ unsigned int (*avail) (void);
+};
+
+static unsigned int
+riscv_builtin_avail_riscv (void)
+{
+ return 1;
+}
+
+/* Construct a riscv_builtin_description from the given arguments.
+
+ INSN is the name of the associated instruction pattern, without the
+ leading CODE_FOR_riscv_.
+
+ CODE is the floating-point condition code associated with the
+ function. It can be 'f' if the field is not applicable.
+
+ NAME is the name of the function itself, without the leading
+ "__builtin_riscv_".
+
+ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
+
+ AVAIL is the name of the availability predicate, without the leading
+ riscv_builtin_avail_. */
+#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
+ { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME, \
+ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
+
+/* Define __builtin_riscv_, which is a RISCV_BUILTIN_DIRECT function
+ mapped to instruction CODE_FOR_, FUNCTION_TYPE and AVAIL
+ are as for RISCV_BUILTIN. */
+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
+
+/* Define __builtin_riscv_, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
+ function mapped to instruction CODE_FOR_, FUNCTION_TYPE
+ and AVAIL are as for RISCV_BUILTIN. */
+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
+ FUNCTION_TYPE, AVAIL)
+
+static const struct riscv_builtin_description riscv_builtins[] = {
+ DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
+};
+
+/* Index I is the function declaration for riscv_builtins[I], or null if the
+ function isn't defined on this target. */
+static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
+
+
+/* Source-level argument types. */
+#define RISCV_ATYPE_VOID void_type_node
+#define RISCV_ATYPE_INT integer_type_node
+#define RISCV_ATYPE_POINTER ptr_type_node
+#define RISCV_ATYPE_CPOINTER const_ptr_type_node
+
+/* Standard mode-based argument types. */
+#define RISCV_ATYPE_UQI unsigned_intQI_type_node
+#define RISCV_ATYPE_SI intSI_type_node
+#define RISCV_ATYPE_USI unsigned_intSI_type_node
+#define RISCV_ATYPE_DI intDI_type_node
+#define RISCV_ATYPE_UDI unsigned_intDI_type_node
+#define RISCV_ATYPE_SF float_type_node
+#define RISCV_ATYPE_DF double_type_node
+
+/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
+ their associated RISCV_ATYPEs. */
+#define RISCV_FTYPE_ATYPES1(A, B) \
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B
+
+#define RISCV_FTYPE_ATYPES2(A, B, C) \
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
+
+#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
+
+#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
+ RISCV_ATYPE_##E
+
+/* Return the function type associated with function prototype TYPE. */
+
+static tree
+riscv_build_function_type (enum riscv_function_type type)
+{
+ static tree types[(int) RISCV_MAX_FTYPE_MAX];
+
+ if (types[(int) type] == NULL_TREE)
+ switch (type)
+ {
+#define DEF_RISCV_FTYPE(NUM, ARGS) \
+ case RISCV_FTYPE_NAME##NUM ARGS: \
+ types[(int) type] \
+ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
+ NULL_TREE); \
+ break;
+#include "config/riscv/riscv-ftypes.def"
+#undef DEF_RISCV_FTYPE
+ default:
+ gcc_unreachable ();
+ }
+
+ return types[(int) type];
+}
+
+/* Implement TARGET_INIT_BUILTINS. */
+
+static void
+riscv_init_builtins (void)
+{
+ const struct riscv_builtin_description *d;
+ unsigned int i;
+
+ /* Iterate through all of the bdesc arrays, initializing all of the
+ builtin functions. */
+ for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
+ {
+ d = &riscv_builtins[i];
+ if (d->avail ())
+ riscv_builtin_decls[i]
+ = add_builtin_function (d->name,
+ riscv_build_function_type (d->function_type),
+ i, BUILT_IN_MD, NULL, NULL);
+ }
+}
+
+/* Implement TARGET_BUILTIN_DECL. */
+
+static tree
+riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= ARRAY_SIZE (riscv_builtins))
+ return error_mark_node;
+ return riscv_builtin_decls[code];
+}
+
+/* Take argument ARGNO from EXP's argument list and convert it into a
+ form suitable for input operand OPNO of instruction ICODE. Return the
+ value. */
+
+static rtx
+riscv_prepare_builtin_arg (enum insn_code icode,
+ unsigned int opno, tree exp, unsigned int argno)
+{
+ tree arg;
+ rtx value;
+ enum machine_mode mode;
+
+ arg = CALL_EXPR_ARG (exp, argno);
+ value = expand_normal (arg);
+ mode = insn_data[icode].operand[opno].mode;
+ if (!insn_data[icode].operand[opno].predicate (value, mode))
+ {
+ /* We need to get the mode from ARG for two reasons:
+
+ - to cope with address operands, where MODE is the mode of the
+ memory, rather than of VALUE itself.
+
+ - to cope with special predicates like pmode_register_operand,
+ where MODE is VOIDmode. */
+ value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
+
+ /* Check the predicate again. */
+ if (!insn_data[icode].operand[opno].predicate (value, mode))
+ {
+ error ("invalid argument to built-in function");
+ return const0_rtx;
+ }
+ }
+
+ return value;
+}
+
+/* Return an rtx suitable for output operand OP of instruction ICODE.
+ If TARGET is non-null, try to use it where possible. */
+
+static rtx
+riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
+{
+ enum machine_mode mode;
+
+ mode = insn_data[icode].operand[op].mode;
+ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
+ target = gen_reg_rtx (mode);
+
+ return target;
+}
+
+/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
+ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
+ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
+ suggests a good place to put the result. */
+
+static rtx
+riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
+ bool has_target_p)
+{
+ rtx ops[MAX_RECOG_OPERANDS];
+ int opno, argno;
+
+ /* Map any target to operand 0. */
+ opno = 0;
+ if (has_target_p)
+ {
+ target = riscv_prepare_builtin_target (icode, opno, target);
+ ops[opno] = target;
+ opno++;
+ }
+
+ /* Map the arguments to the other operands. The n_operands value
+ for an expander includes match_dups and match_scratches as well as
+ match_operands, so n_operands is only an upper bound on the number
+ of arguments to the expander function. */
+ gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
+ for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
+ ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
+
+ switch (opno)
+ {
+ case 2:
+ emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
+ break;
+
+ case 3:
+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
+ break;
+
+ case 4:
+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return target;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN. */
+
+static rtx
+riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl;
+ unsigned int fcode, avail;
+ const struct riscv_builtin_description *d;
+
+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ fcode = DECL_FUNCTION_CODE (fndecl);
+ gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
+ d = &riscv_builtins[fcode];
+ avail = d->avail ();
+ gcc_assert (avail != 0);
+ switch (d->builtin_type)
+ {
+ case RISCV_BUILTIN_DIRECT:
+ return riscv_expand_builtin_direct (d->icode, target, exp, true);
+
+ case RISCV_BUILTIN_DIRECT_NO_TARGET:
+ return riscv_expand_builtin_direct (d->icode, target, exp, false);
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
+ in order to avoid duplicating too much logic from elsewhere. */
+
+static void
+riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ rtx this_rtx, temp1, temp2, insn, fnaddr;
+ bool use_sibcall_p;
+
+ /* Pretend to be a post-reload pass while generating rtl. */
+ reload_completed = 1;
+
+ /* Mark the end of the (empty) prologue. */
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ /* Determine if we can use a sibcall to call FUNCTION directly. */
+ fnaddr = XEXP (DECL_RTL (function), 0);
+ use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
+
+ /* We need two temporary registers in some cases. */
+ temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
+ temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
+
+ /* Find out which register contains the "this" pointer. */
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
+ else
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+
+ /* Add DELTA to THIS_RTX. */
+ if (delta != 0)
+ {
+ rtx offset = GEN_INT (delta);
+ if (!SMALL_OPERAND (delta))
+ {
+ riscv_emit_move (temp1, offset);
+ offset = temp1;
+ }
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
+ }
+
+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
+ if (vcall_offset != 0)
+ {
+ rtx addr;
+
+ /* Set TEMP1 to *THIS_RTX. */
+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
+
+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
+ addr = riscv_add_offset (temp2, temp1, vcall_offset);
+
+ /* Load the offset and add it to THIS_RTX. */
+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
+ }
+
+ /* Jump to the target function. Use a sibcall if direct jumps are
+ allowed, otherwise load the address into a register first. */
+ if (use_sibcall_p)
+ {
+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
+ SIBLING_CALL_P (insn) = 1;
+ }
+ else
+ {
+ riscv_emit_move(temp1, fnaddr);
+ emit_jump_insn (gen_indirect_jump (temp1));
+ }
+
+ /* Run just enough of rest_of_compilation. This sequence was
+ "borrowed" from alpha.c. */
+ insn = get_insns ();
+ split_all_insns_noflow ();
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+
+ /* Clean up the vars set above. Note that final_end_function resets
+ the global pointer for us. */
+ reload_completed = 0;
+}
+
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+
+static struct machine_function *
+riscv_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+riscv_option_override (void)
+{
+ int regno, mode;
+ const struct riscv_cpu_info *cpu;
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+ flag_pcc_struct_return = 0;
+
+ if (flag_pic)
+ g_switch_value = 0;
+
+ /* Prefer a call to memcpy over inline code when optimizing for size,
+ though see MOVE_RATIO in riscv.h. */
+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
+ target_flags |= MASK_MEMCPY;
+
+ /* Handle -mtune. */
+ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
+ RISCV_TUNE_STRING_DEFAULT);
+ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
+
+ /* If the user hasn't specified a branch cost, use the processor's
+ default. */
+ if (riscv_branch_cost == 0)
+ riscv_branch_cost = tune_info->branch_cost;
+
+ /* Set up riscv_hard_regno_mode_ok. */
+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ riscv_hard_regno_mode_ok[mode][regno]
+ = riscv_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
+
+ /* Function to allocate machine-dependent function status. */
+ init_machine_status = &riscv_init_machine_status;
+
+ if (riscv_cmodel_string)
+ {
+ if (strcmp (riscv_cmodel_string, "medlow") == 0)
+ riscv_cmodel = CM_MEDLOW;
+ else if (strcmp (riscv_cmodel_string, "medany") == 0)
+ riscv_cmodel = CM_MEDANY;
+ else
+ error ("unsupported code model: %s", riscv_cmodel_string);
+ }
+
+ if (flag_pic)
+ riscv_cmodel = CM_PIC;
+
+ riscv_init_relocs ();
+}
+
+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
+
+static void
+riscv_conditional_register_usage (void)
+{
+ int regno;
+
+ if (!TARGET_HARD_FLOAT)
+ {
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
+ fixed_regs[regno] = call_used_regs[regno] = 1;
+ }
+}
+
+/* Implement TARGET_TRAMPOLINE_INIT. */
+
+static void
+riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx addr, end_addr, mem;
+ rtx trampoline[4];
+ unsigned int i;
+ HOST_WIDE_INT static_chain_offset, target_function_offset;
+
+ /* Work out the offsets of the pointers from the start of the
+ trampoline code. */
+ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
+ static_chain_offset = TRAMPOLINE_CODE_SIZE;
+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
+
+ /* Get pointers to the beginning and end of the code block. */
+ addr = force_reg (Pmode, XEXP (m_tramp, 0));
+ end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
+
+#define OP(X) gen_int_mode (X, SImode)
+#define MATCH_LREG ((Pmode) == DImode ? MATCH_LD : MATCH_LW)
+
+ /* auipc t0, 0
+ l[wd] t1, target_function_offset(t0)
+ l[wd] $static_chain, static_chain_offset(t0)
+ jr t1
+ */
+
+ trampoline[0] = OP (RISCV_UTYPE (AUIPC, STATIC_CHAIN_REGNUM, 0));
+ trampoline[1] = OP (RISCV_ITYPE (LREG, RISCV_PROLOGUE_TEMP_REGNUM,
+ STATIC_CHAIN_REGNUM, target_function_offset));
+ trampoline[2] = OP (RISCV_ITYPE (LREG, STATIC_CHAIN_REGNUM,
+ STATIC_CHAIN_REGNUM, static_chain_offset));
+ trampoline[3] = OP (RISCV_ITYPE (JALR, 0, RISCV_PROLOGUE_TEMP_REGNUM, 0));
+
+#undef MATCH_LREG
+#undef OP
+
+ /* Copy the trampoline code. Leave any padding uninitialized. */
+ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
+ {
+ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
+ riscv_emit_move (mem, trampoline[i]);
+ }
+
+ /* Set up the static chain pointer field. */
+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
+ riscv_emit_move (mem, chain_value);
+
+ /* Set up the target function field. */
+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
+ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
+
+ /* Flush the code part of the trampoline. */
+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
+ emit_insn (gen_clear_cache (addr, end_addr));
+}
+
+static bool
+riscv_lra_p (void)
+{
+ return riscv_lra_flag;
+}
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE riscv_option_override
+
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
+
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST riscv_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL hook_bool_tree_tree_true
+
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS riscv_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST riscv_address_cost
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
+
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND riscv_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG riscv_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
+
+#undef TARGET_MODE_REP_EXTENDED
+#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS riscv_init_builtins
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL riscv_builtin_decl
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
+
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS HAVE_AS_TLS
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
+
+#ifdef HAVE_AS_DTPRELWORD
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
+#endif
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE riscv_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
+
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET (-RISCV_IMM_REACH/2)
+
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET (RISCV_IMM_REACH/2-1)
+
+#undef TARGET_LRA_P
+#define TARGET_LRA_P riscv_lra_p
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-riscv.h"
diff -rNU3 dist.orig/gcc/config/riscv/riscv.h dist/gcc/config/riscv/riscv.h
--- dist.orig/gcc/config/riscv/riscv.h 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv.h 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,1132 @@
+/* Definition of RISC-V target for GNU compiler.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+ Based on MIPS target for GNU compiler.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+. */
+
+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
+ directly accessible, while the command-line options select
+ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
+ in use. */
+#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI
+#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_assert ("machine=riscv"); \
+ \
+ builtin_assert ("cpu=riscv"); \
+ builtin_define ("__riscv__"); \
+ builtin_define ("__riscv"); \
+ builtin_define ("_riscv"); \
+ \
+ if (TARGET_64BIT) \
+ { \
+ builtin_define ("__riscv64"); \
+ builtin_define ("_RISCV_SIM=_ABI64"); \
+ } \
+ else \
+ builtin_define ("_RISCV_SIM=_ABI32"); \
+ \
+ builtin_define ("_ABI32=1"); \
+ builtin_define ("_ABI64=3"); \
+ \
+ \
+ builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE); \
+ builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE); \
+ builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE); \
+ builtin_define_with_int_value ("_RISCV_FPSET", 32); \
+ \
+ if (TARGET_ATOMIC) { \
+ builtin_define ("__riscv_atomic"); \
+ } \
+ \
+ /* These defines reflect the ABI in use, not whether the \
+ FPU is directly accessible. */ \
+ if (TARGET_HARD_FLOAT_ABI) { \
+ builtin_define ("__riscv_hard_float"); \
+ if (TARGET_FDIV) { \
+ builtin_define ("__riscv_fdiv"); \
+ builtin_define ("__riscv_fsqrt"); \
+ } \
+ } else \
+ builtin_define ("__riscv_soft_float"); \
+ \
+ /* The base RISC-V ISA is always little-endian. */ \
+ builtin_define_std ("RISCVEL"); \
+ builtin_define ("_RISCVEL"); \
+ \
+ /* Macros dependent on the C dialect. */ \
+ if (preprocessing_asm_p ()) \
+ { \
+ builtin_define_std ("LANGUAGE_ASSEMBLY"); \
+ builtin_define ("_LANGUAGE_ASSEMBLY"); \
+ } \
+ else if (c_dialect_cxx ()) \
+ { \
+ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); \
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \
+ } \
+ else \
+ { \
+ builtin_define_std ("LANGUAGE_C"); \
+ builtin_define ("_LANGUAGE_C"); \
+ } \
+ if (c_dialect_objc ()) \
+ { \
+ builtin_define ("_LANGUAGE_OBJECTIVE_C"); \
+ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \
+ /* Bizarre, but needed at least for Irix. */ \
+ builtin_define_std ("LANGUAGE_C"); \
+ builtin_define ("_LANGUAGE_C"); \
+ } \
+ if (riscv_cmodel == CM_MEDANY) \
+ builtin_define ("_RISCV_CMODEL_MEDANY"); \
+ } \
+ while (0)
+
+/* Default target_flags if no switches are specified */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+#ifndef RISCV_ARCH_STRING_DEFAULT
+#define RISCV_ARCH_STRING_DEFAULT "IMAFD"
+#endif
+
+#ifndef RISCV_TUNE_STRING_DEFAULT
+#define RISCV_TUNE_STRING_DEFAULT "rocket"
+#endif
+
+#ifndef TARGET_64BIT_DEFAULT
+#define TARGET_64BIT_DEFAULT 1
+#endif
+
+#if TARGET_64BIT_DEFAULT
+# define MULTILIB_ARCH_DEFAULT "m64"
+# define OPT_ARCH64 "!m32"
+# define OPT_ARCH32 "m32"
+#else
+# define MULTILIB_ARCH_DEFAULT "m32"
+# define OPT_ARCH64 "m64"
+# define OPT_ARCH32 "!m64"
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { MULTILIB_ARCH_DEFAULT }
+#endif
+
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-arch is ignored if -march is specified.
+ --with-tune is ignored if -mtune is specified.
+ --with-float is ignored if -mhard-float or -msoft-float are specified. */
+#define OPTION_DEFAULT_SPECS \
+ {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
+ {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
+
+#define DRIVER_SELF_SPECS ""
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+/* Make this compile time constant for libgcc2 */
+#ifdef __riscv64
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif /* IN_LIBGCC2 */
+
+/* Tell collect what flags to pass to nm. */
+#ifndef NM_FLAGS
+#define NM_FLAGS "-Bn"
+#endif
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%(subtarget_asm_debugging_spec) \
+%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
+%{fPIC|fpic|fPIE|fpie:-fpic} \
+%{march=*} \
+%(subtarget_asm_spec)"
+
+/* Extra switches sometimes passed to the linker. */
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "\
+%{!T:-dT riscv.ld} \
+%{m64:-melf64lriscv} \
+%{m32:-melf32lriscv} \
+%{shared}"
+#endif /* LINK_SPEC defined */
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define EXTRA_SPECS \
+ { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT }, \
+ SUBTARGET_EXTRA_SPECS
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define TARGET_DEFAULT_CMODEL CM_MEDLOW
+
+/* By default, turn on GDB extensions. */
+#define DEFAULT_GDB_EXTENSIONS 1
+
+#define LOCAL_LABEL_PREFIX "."
+#define USER_LABEL_PREFIX ""
+
+#define DWARF2_DEBUGGING_INFO 1
+#define DWARF2_ASM_LINE_DEBUG_INFO 0
+
+/* The mapping from gcc register number to DWARF 2 CFA column number. */
+#define DWARF_FRAME_REGNUM(REGNO) \
+ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
+
+/* The DWARF 2 CFA column which tracks the return address. */
+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
+
+/* Don't emit .cfi_sections, as it does not work */
+#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
+#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
+
+/* Before the prologue, RA lives in r31. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) \
+ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
+
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
+
+/* Target machine storage layout */
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 0
+#define WORDS_BIG_ENDIAN 0
+
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#ifndef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD 4
+#endif
+
+/* We currently require both or neither of the `F' and `D' extensions. */
+#define UNITS_PER_FPREG 8
+
+/* If FP regs aren't wide enough for a given FP argument, it is passed in
+ integer registers. */
+#define MIN_FPRS_PER_FMT 1
+
+/* The largest size of value that can be held in floating-point
+ registers and moved with a single instruction. */
+#define UNITS_PER_HWFPVALUE \
+ (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG)
+
+/* The largest size of value that can be held in floating-point
+ registers. */
+#define UNITS_PER_FPVALUE \
+ (TARGET_SOFT_FLOAT_ABI ? 0 \
+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
+
+/* The number of bytes in a double. */
+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
+
+/* Set the sizes of the core types. */
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+/* XXX The ABI says long doubles are IEEE-754-2008 float128s. */
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#ifdef IN_LIBGCC2
+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
+#endif
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY BITS_PER_WORD
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* There is no point aligning anything to a rounder boundary than this. */
+#define BIGGEST_ALIGNMENT 128
+
+/* All accesses must be aligned. */
+#define STRICT_ALIGNMENT 1
+
+/* Define this if you wish to imitate the way many other C compilers
+ handle alignment of bitfields and the structures that contain
+ them.
+
+ The behavior is that the type written for a bit-field (`int',
+ `short', or other integer type) imposes an alignment for the
+ entire structure, as if the structure really did contain an
+ ordinary field of that type. In addition, the bit-field is placed
+ within the structure so that it would fit within such a field,
+ not crossing a boundary for it.
+
+ Thus, on most machines, a bit-field whose type is written as `int'
+ would not cross a four-byte boundary, and would force four-byte
+ alignment for the whole structure. (The alignment used may not
+ be four bytes; it is controlled by the other alignment
+ parameters.)
+
+ If the macro is defined, its definition should be a C expression;
+ a nonzero value for the expression enables this behavior. */
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* If defined, a C expression to compute the alignment given to a
+ constant that is being placed in memory. CONSTANT is the constant
+ and ALIGN is the alignment that the object would ordinarily have.
+ The value of this macro is used instead of that alignment to align
+ the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ The typical use of this macro is to increase alignment for string
+ constants to be word aligned so that `strcpy' calls that copy
+ constants can be done inline. */
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* If defined, a C expression to compute the alignment for a static
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that `strcpy' calls
+ that copy constants to character arrays can be done inline. */
+
+#undef DATA_ALIGNMENT
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
+ character arrays to be word-aligned so that `strcpy' calls that copy
+ constants to character arrays can be done inline, and 'strcmp' can be
+ optimised to use word loads. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ DATA_ALIGNMENT (TYPE, ALIGN)
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
+ moves. All other references are zero extended. */
+#define LOAD_EXTEND_OP(MODE) \
+ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
+ ? SIGN_EXTEND : ZERO_EXTEND)
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (MODE) = Pmode; \
+ }
+
+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
+ Extensions of pointers to word_mode must be signed. */
+#define POINTERS_EXTEND_UNSIGNED false
+
+/* RV32 double-precision FP <-> integer moves go through memory */
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ (!TARGET_64BIT && GET_MODE_SIZE (MODE) == 8 && \
+ (((CLASS1) == FP_REGS && (CLASS2) != FP_REGS) \
+ || ((CLASS2) == FP_REGS && (CLASS1) != FP_REGS)))
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* Standard register usage. */
+
+/* Number of hardware registers. We have:
+
+ - 32 integer registers
+ - 32 floating point registers
+ - 32 vector integer registers
+ - 32 vector floating point registers
+ - 2 fake registers:
+ - ARG_POINTER_REGNUM
+ - FRAME_POINTER_REGNUM */
+
+#define FIRST_PSEUDO_REGISTER 66
+
+/* x0, sp, gp, and tp are fixed. */
+
+#define FIXED_REGISTERS \
+{ /* General registers. */ \
+ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* Floating-point registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* Others. */ \
+ 1, 1 \
+}
+
+
+/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
+ The call RTLs themselves clobber ra. */
+
+#define CALL_USED_REGISTERS \
+{ /* General registers. */ \
+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
+ /* Floating-point registers. */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
+ /* Others. */ \
+ 1, 1 \
+}
+
+#define CALL_REALLY_USED_REGISTERS \
+{ /* General registers. */ \
+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
+ /* Floating-point registers. */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
+ /* Others. */ \
+ 1, 1 \
+}
+
+/* Internal macros to classify an ISA register's type. */
+
+#define GP_REG_FIRST 0
+#define GP_REG_LAST 31
+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
+
+#define FP_REG_FIRST 32
+#define FP_REG_LAST 63
+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
+
+/* The DWARF 2 CFA column which tracks the return address from a
+ signal handler context. This means that to maintain backwards
+ compatibility, no hard register can be assigned this column if it
+ would need to be handled by the DWARF unwinder. */
+#define DWARF_ALT_FRAME_RETURN_COLUMN 64
+
+#define GP_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
+#define FP_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
+
+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
+
+/* Return coprocessor number from register number. */
+
+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
+ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
+ : COP3_REG_P (REGNO) ? '3' : '?')
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ riscv_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT \
+ && GET_MODE_CLASS (MODE2) == MODE_INT))
+
+/* Use s0 as the frame pointer if it is so requested. */
+#define HARD_FRAME_POINTER_REGNUM 8
+#define STACK_POINTER_REGNUM 2
+#define THREAD_POINTER_REGNUM 4
+
+/* These two registers don't really exist: they get eliminated to either
+ the stack or hard frame pointer. */
+#define ARG_POINTER_REGNUM 64
+#define FRAME_POINTER_REGNUM 65
+
+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
+
+/* Registers used as temporaries in prologue/epilogue code.
+
+ The prologue registers mustn't conflict with any
+ incoming arguments, the static chain pointer, or the frame pointer.
+ The epilogue temporary mustn't conflict with the return registers,
+ the frame pointer, the EH stack adjustment, or the EH data registers. */
+
+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
+#define RISCV_EPILOGUE_TEMP_REGNUM RISCV_PROLOGUE_TEMP_REGNUM
+
+#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
+#define RISCV_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_EPILOGUE_TEMP_REGNUM)
+
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+{ \
+ sorry ("profiler support for RISC-V"); \
+}
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+#define NO_FUNCTION_CSE 1
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+enum reg_class
+{
+ NO_REGS, /* no registers in set */
+ T_REGS, /* registers used by indirect sibcalls */
+ GR_REGS, /* integer registers */
+ FP_REGS, /* floating point registers */
+ FRAME_REGS, /* $arg and $frame */
+ ALL_REGS, /* all registers */
+ LIM_REG_CLASSES /* max value + 1 */
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define GENERAL_REGS GR_REGS
+
+/* An initializer containing the names of the register classes as C
+ string constants. These names are used in writing some of the
+ debugging dumps. */
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "T_REGS", \
+ "GR_REGS", \
+ "FP_REGS", \
+ "FRAME_REGS", \
+ "ALL_REGS" \
+}
+
+/* An initializer containing the contents of the register classes,
+ as integers which are bit masks. The Nth integer specifies the
+ contents of class N. The way the integer MASK is interpreted is
+ that register R is in the class if `MASK & (1 << R)' is 1.
+
+ When the machine has more than 32 registers, an integer does not
+ suffice. Then the integers are replaced by sub-initializers,
+ braced groupings containing several integers. Each
+ sub-initializer must be suitable as an initializer for the type
+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0xf00000e0, 0x00000000, 0x00000000 }, /* T_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
+}
+
+/* A C expression whose value is a register class containing hard
+ register REGNO. In general there is more that one such class;
+ choose a class which is "minimal", meaning that no smaller class
+ also contains the register. */
+
+#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
+
+/* A macro whose definition is the name of the class to which a
+ valid base register must belong. A base register is one used in
+ an address which is the register value plus a displacement. */
+
+#define BASE_REG_CLASS GR_REGS
+
+/* A macro whose definition is the name of the class to which a
+ valid index register must belong. An index register is one used
+ in an address where its value is either multiplied by a scale
+ factor or added to another register (as well as added to a
+ displacement). */
+
+#define INDEX_REG_CLASS NO_REGS
+
+/* We generally want to put call-clobbered registers ahead of
+ call-saved ones. (IRA expects this.) */
+
+#define REG_ALLOC_ORDER \
+{ \
+ /* Call-clobbered GPRs. */ \
+ 15, 14, 13, 12, 11, 10, 16, 17, 5, 6, 7, 28, 29, 30, 31, 1, \
+ /* Call-saved GPRs. */ \
+ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
+ /* GPRs that can never be exposed to the register allocator. */ \
+ 0, 2, 3, 4, \
+ /* Call-clobbered FPRs. */ \
+ 32, 33, 34, 35, 36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49, \
+ 60, 61, 62, 63, \
+ /* Call-saved FPRs. */ \
+ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
+ /* None of the remaining classes have defined call-saved \
+ registers. */ \
+ 64, 65 \
+}
+
+/* True if VALUE is a signed 16-bit number. */
+
+#include "opcode-riscv.h"
+#define SMALL_OPERAND(VALUE) \
+ ((unsigned HOST_WIDE_INT) (VALUE) + RISCV_IMM_REACH/2 < RISCV_IMM_REACH)
+
+/* True if VALUE can be loaded into a register using LUI. */
+
+#define LUI_OPERAND(VALUE) \
+ (((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) == ((1UL<<31) - RISCV_IMM_REACH) \
+ || ((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) + RISCV_IMM_REACH == 0)
+
+/* Return a value X with the low 16 bits clear, and such that
+ VALUE - X is a signed 16-bit value. */
+
+#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X))
+#define LUI_INT(X) LUI_OPERAND (INTVAL (X))
+
+/* The HI and LO registers can only be reloaded via the general
+ registers. Condition code registers can only be loaded to the
+ general registers, and from the floating point registers. */
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ riscv_secondary_reload_class (CLASS, MODE, X, true)
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ riscv_secondary_reload_class (CLASS, MODE, X, false)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) riscv_class_max_nregs (CLASS, MODE)
+
+/* It is undefined to interpret an FP register in a different format than
+ that which it was created to be. */
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ reg_classes_intersect_p (FP_REGS, CLASS)
+
+/* Stack layout; function entry, exit and calling. */
+
+#define STACK_GROWS_DOWNWARD
+
+#define FRAME_GROWS_DOWNWARD 1
+
+#define STARTING_FRAME_OFFSET 0
+
+#define RETURN_ADDR_RTX riscv_return_addr
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
+
+/* Allocate stack space for arguments at the beginning of each function. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* The argument pointer always points to the first argument. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+#define REG_PARM_STACK_SPACE(FNDECL) 0
+
+/* Define this if it is the responsibility of the caller to
+ allocate the area reserved for arguments passed in registers.
+ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
+ of this macro is to determine whether the space is included in
+ `crtl->outgoing_args_size'. */
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
+
+#define STACK_BOUNDARY 128
+
+/* Symbolic macros for the registers used to return integer and floating
+ point values. */
+
+#define GP_RETURN GP_ARG_FIRST
+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : FP_ARG_FIRST)
+
+#define MAX_ARGS_IN_REGISTERS 8
+
+/* Symbolic macros for the first/last argument registers. */
+
+#define GP_ARG_FIRST (GP_REG_FIRST + 10)
+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
+#define FP_ARG_FIRST (FP_REG_FIRST + 10)
+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+
+#define LIBCALL_VALUE(MODE) \
+ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ riscv_function_value (VALTYPE, FUNC, VOIDmode)
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
+
+/* 1 if N is a possible register number for function argument passing.
+ We have no FP argument registers when soft-float. When FP registers
+ are 32 bits, we can't directly reference the odd numbered ones. */
+
+/* Accept arguments in a0-a7 and/or fa0-fa7. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
+ || IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))
+
+/* The ABI views the arguments as a structure, of which the first 8
+ words go in registers and the rest go on the stack. If I < 8, N, the Ith
+ word might go in the Ith integer argument register or the Ith
+ floating-point argument register. */
+
+typedef struct {
+ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
+ unsigned int num_gprs;
+
+ /* Number of words passed on the stack. */
+ unsigned int stack_words;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ memset (&(CUM), 0, sizeof (CUM))
+
+#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
+
+/* ABI requires 16-byte alignment, even on ven on RV32. */
+#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
+
+#define NO_PROFILE_COUNTERS 1
+
+/* Define this macro if the code for function profiling should come
+ before the function prologue. Normally, the profiling code comes
+ after. */
+
+/* #define PROFILE_BEFORE_PROLOGUE */
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+
+/* Trampolines are a block of code followed by two pointers. */
+
+#define TRAMPOLINE_CODE_SIZE 16
+#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
+
+/* Addressing modes, and classification of registers for them. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects them all.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Some source files that are used after register allocation
+ need to be strict. */
+
+#ifndef REG_OK_STRICT
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
+#else
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
+#endif
+
+#define REG_OK_FOR_INDEX_P(X) 0
+
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (CONSTANT_P (X) && memory_address_p (SImode, X))
+
+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
+ 'the start of the function that this code is output in'. */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
+ asm_fprintf ((FILE), "%U%s", \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
+ else \
+ asm_fprintf ((FILE), "%U%s", (NAME))
+
+/* This flag marks functions that cannot be lazily bound. */
+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
+#define SYMBOL_REF_BIND_NOW_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+#define CASE_VECTOR_MODE SImode
+#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
+#define MOVE_MAX UNITS_PER_WORD
+#define MAX_MOVE_MAX 8
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
+ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+
+#ifndef Pmode
+#define Pmode (TARGET_64BIT ? DImode : SImode)
+#endif
+
+/* Give call MEMs SImode since it is the "most permissive" mode
+ for both 32-bit and 64-bit targets. */
+
+#define FUNCTION_MODE SImode
+
+/* A C expression for the cost of a branch instruction. A value of 2
+ seems to minimize code size. */
+
+#define BRANCH_COST(speed_p, predictable_p) \
+ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
+
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+
+/* Control the assembler format that we output. */
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#ifndef ASM_APP_ON
+#define ASM_APP_ON " #APP\n"
+#endif
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#ifndef ASM_APP_OFF
+#define ASM_APP_OFF " #NO_APP\n"
+#endif
+
+#define REGISTER_NAMES \
+{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
+ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
+ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
+ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
+ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
+ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
+ "arg", "frame", }
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ { "x0", 0 + GP_REG_FIRST }, \
+ { "x1", 1 + GP_REG_FIRST }, \
+ { "x2", 2 + GP_REG_FIRST }, \
+ { "x3", 3 + GP_REG_FIRST }, \
+ { "x4", 4 + GP_REG_FIRST }, \
+ { "x5", 5 + GP_REG_FIRST }, \
+ { "x6", 6 + GP_REG_FIRST }, \
+ { "x7", 7 + GP_REG_FIRST }, \
+ { "x8", 8 + GP_REG_FIRST }, \
+ { "x9", 9 + GP_REG_FIRST }, \
+ { "x10", 10 + GP_REG_FIRST }, \
+ { "x11", 11 + GP_REG_FIRST }, \
+ { "x12", 12 + GP_REG_FIRST }, \
+ { "x13", 13 + GP_REG_FIRST }, \
+ { "x14", 14 + GP_REG_FIRST }, \
+ { "x15", 15 + GP_REG_FIRST }, \
+ { "x16", 16 + GP_REG_FIRST }, \
+ { "x17", 17 + GP_REG_FIRST }, \
+ { "x18", 18 + GP_REG_FIRST }, \
+ { "x19", 19 + GP_REG_FIRST }, \
+ { "x20", 20 + GP_REG_FIRST }, \
+ { "x21", 21 + GP_REG_FIRST }, \
+ { "x22", 22 + GP_REG_FIRST }, \
+ { "x23", 23 + GP_REG_FIRST }, \
+ { "x24", 24 + GP_REG_FIRST }, \
+ { "x25", 25 + GP_REG_FIRST }, \
+ { "x26", 26 + GP_REG_FIRST }, \
+ { "x27", 27 + GP_REG_FIRST }, \
+ { "x28", 28 + GP_REG_FIRST }, \
+ { "x29", 29 + GP_REG_FIRST }, \
+ { "x30", 30 + GP_REG_FIRST }, \
+ { "x31", 31 + GP_REG_FIRST }, \
+ { "f0", 0 + FP_REG_FIRST }, \
+ { "f1", 1 + FP_REG_FIRST }, \
+ { "f2", 2 + FP_REG_FIRST }, \
+ { "f3", 3 + FP_REG_FIRST }, \
+ { "f4", 4 + FP_REG_FIRST }, \
+ { "f5", 5 + FP_REG_FIRST }, \
+ { "f6", 6 + FP_REG_FIRST }, \
+ { "f7", 7 + FP_REG_FIRST }, \
+ { "f8", 8 + FP_REG_FIRST }, \
+ { "f9", 9 + FP_REG_FIRST }, \
+ { "f10", 10 + FP_REG_FIRST }, \
+ { "f11", 11 + FP_REG_FIRST }, \
+ { "f12", 12 + FP_REG_FIRST }, \
+ { "f13", 13 + FP_REG_FIRST }, \
+ { "f14", 14 + FP_REG_FIRST }, \
+ { "f15", 15 + FP_REG_FIRST }, \
+ { "f16", 16 + FP_REG_FIRST }, \
+ { "f17", 17 + FP_REG_FIRST }, \
+ { "f18", 18 + FP_REG_FIRST }, \
+ { "f19", 19 + FP_REG_FIRST }, \
+ { "f20", 20 + FP_REG_FIRST }, \
+ { "f21", 21 + FP_REG_FIRST }, \
+ { "f22", 22 + FP_REG_FIRST }, \
+ { "f23", 23 + FP_REG_FIRST }, \
+ { "f24", 24 + FP_REG_FIRST }, \
+ { "f25", 25 + FP_REG_FIRST }, \
+ { "f26", 26 + FP_REG_FIRST }, \
+ { "f27", 27 + FP_REG_FIRST }, \
+ { "f28", 28 + FP_REG_FIRST }, \
+ { "f29", 29 + FP_REG_FIRST }, \
+ { "f30", 30 + FP_REG_FIRST }, \
+ { "f31", 31 + FP_REG_FIRST }, \
+}
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.globl\t"
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
+
+/* This is how to output an element of a PIC case-vector. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
+ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG))
+
+/* Define the strings to put out for each section in the object file. */
+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
+#define DATA_SECTION_ASM_OP "\t.data" /* large data */
+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
+#define BSS_SECTION_ASM_OP "\t.bss"
+#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
+#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+do \
+ { \
+ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ TARGET_64BIT ? "sd" : "sw", \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM]); \
+ } \
+while (0)
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+do \
+ { \
+ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
+ TARGET_64BIT ? "ld" : "lw", \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM]); \
+ } \
+while (0)
+
+#define ASM_COMMENT_START "#"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
+
+/* The maximum number of bytes that can be copied by one iteration of
+ a movmemsi loop; see riscv_block_move_loop. */
+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
+
+/* The maximum number of bytes that can be copied by a straight-line
+ implementation of movmemsi; see riscv_block_move_straight. We want
+ to make sure that any loop-based implementation will iterate at
+ least twice. */
+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
+
+/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
+
+#define RISCV_CALL_RATIO 6
+
+/* Any loop-based implementation of movmemsi will have at least
+ RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
+ moves, so allow individual copies of fewer elements.
+
+ When movmemsi is not available, use a value approximating
+ the length of a memcpy call sequence, so that move_by_pieces
+ will generate inline code if it is shorter than a function call.
+ Since move_by_pieces_ninsns counts memory-to-memory moves, but
+ we'll have to generate a load/store pair for each, halve the
+ value of RISCV_CALL_RATIO to take that into account. */
+
+#define MOVE_RATIO(speed) \
+ (HAVE_movmemsi \
+ ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
+ : RISCV_CALL_RATIO / 2)
+
+/* movmemsi is meant to generate code that is at least as good as
+ move_by_pieces. However, movmemsi effectively uses a by-pieces
+ implementation both for moves smaller than a word and for word-aligned
+ moves of no more than RISCV_MAX_MOVE_BYTES_STRAIGHT bytes. We should
+ allow the tree-level optimisers to do such moves by pieces, as it
+ often exposes other optimization opportunities. We might as well
+ continue to use movmemsi at the rtl level though, as it produces
+ better code when scheduling is disabled (such as at -O). */
+
+#define MOVE_BY_PIECES_P(SIZE, ALIGN) \
+ (HAVE_movmemsi \
+ ? (!currently_expanding_to_rtl \
+ && ((ALIGN) < BITS_PER_WORD \
+ ? (SIZE) < UNITS_PER_WORD \
+ : (SIZE) <= RISCV_MAX_MOVE_BYTES_STRAIGHT)) \
+ : (move_by_pieces_ninsns (SIZE, ALIGN, MOVE_MAX_PIECES + 1) \
+ < (unsigned int) MOVE_RATIO (false)))
+
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
+ of the length of a memset call, but use the default otherwise. */
+
+#define CLEAR_RATIO(speed)\
+ ((speed) ? 15 : RISCV_CALL_RATIO)
+
+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
+ optimizing for size adjust the ratio to account for the overhead of
+ loading the constant and replicating it across the word. */
+
+#define SET_RATIO(speed) \
+ ((speed) ? 15 : RISCV_CALL_RATIO - 2)
+
+/* STORE_BY_PIECES_P can be used when copying a constant string, but
+ in that case each word takes 3 insns (lui, ori, sw), or more in
+ 64-bit mode, instead of 2 (lw, sw). For now we always fail this
+ and let the move_by_pieces code copy the string from read-only
+ memory. In the future, this could be tuned further for multi-issue
+ CPUs that can issue stores down one pipe and arithmetic instructions
+ down another; in that case, the lui/ori/sw combination would be a
+ win for long enough strings. */
+
+#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
+
+#ifndef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+#endif
+
+#ifndef USED_FOR_TARGET
+
+extern const enum reg_class riscv_regno_to_class[];
+extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
+extern const char* riscv_hi_relocs[];
+#endif
+
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
diff -rNU3 dist.orig/gcc/config/riscv/riscv.md dist/gcc/config/riscv/riscv.md
--- dist.orig/gcc/config/riscv/riscv.md 1970-01-01 01:00:00.000000000 +0100
+++ dist/gcc/config/riscv/riscv.md 2015-10-18 13:19:50.000000000 +0200
@@ -0,0 +1,2427 @@
+;; Machine description for RISC-V for GNU compiler.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
+;; Based on MIPS target for GNU compiler.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; .
+
+(define_c_enum "unspec" [
+ ;; Floating-point moves.
+ UNSPEC_LOAD_LOW
+ UNSPEC_LOAD_HIGH
+ UNSPEC_STORE_WORD
+
+ ;; GP manipulation.
+ UNSPEC_EH_RETURN
+
+ ;; Symbolic accesses.
+ UNSPEC_ADDRESS_FIRST
+ UNSPEC_LOAD_GOT
+ UNSPEC_TLS
+ UNSPEC_TLS_LE
+ UNSPEC_TLS_IE
+ UNSPEC_TLS_GD
+
+ ;; Blockage and synchronisation.
+ UNSPEC_BLOCKAGE
+ UNSPEC_FENCE
+ UNSPEC_FENCE_I
+])
+
+(define_constants
+ [(RETURN_ADDR_REGNUM 1)
+])
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; ....................
+;;
+;; Attributes
+;;
+;; ....................
+
+(define_attr "got" "unset,xgot_high,load"
+ (const_string "unset"))
+
+;; For jal instructions, this attribute is DIRECT when the target address
+;; is symbolic and INDIRECT when it is a register.
+(define_attr "jal" "unset,direct,indirect"
+ (const_string "unset"))
+
+;; Classification of moves, extensions and truncations. Most values
+;; are as for "type" (see below) but there are also the following
+;; move-specific values:
+;;
+;; andi a single ANDI instruction
+;; shift_shift a shift left followed by a shift right
+;;
+;; This attribute is used to determine the instruction's length and
+;; scheduling type. For doubleword moves, the attribute always describes
+;; the split instructions; in some cases, it is more appropriate for the
+;; scheduling type to be "multi" instead.
+(define_attr "move_type"
+ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
+ const,logical,arith,andi,shift_shift"
+ (const_string "unknown"))
+
+(define_attr "alu_type" "unknown,add,sub,and,or,xor"
+ (const_string "unknown"))
+
+;; Main data type used by the insn
+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
+ (const_string "unknown"))
+
+;; True if the main data type is twice the size of a word.
+(define_attr "dword_mode" "no,yes"
+ (cond [(and (eq_attr "mode" "DI,DF")
+ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
+ (const_string "yes")
+
+ (and (eq_attr "mode" "TI,TF")
+ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
+ (const_string "yes")]
+ (const_string "no")))
+
+;; Classification of each insn.
+;; branch conditional branch
+;; jump unconditional jump
+;; call unconditional call
+;; load load instruction(s)
+;; fpload floating point load
+;; fpidxload floating point indexed load
+;; store store instruction(s)
+;; fpstore floating point store
+;; fpidxstore floating point indexed store
+;; mtc transfer to coprocessor
+;; mfc transfer from coprocessor
+;; const load constant
+;; arith integer arithmetic instructions
+;; logical integer logical instructions
+;; shift integer shift instructions
+;; slt set less than instructions
+;; imul integer multiply
+;; idiv integer divide
+;; move integer register move (addi rd, rs1, 0)
+;; fmove floating point register move
+;; fadd floating point add/subtract
+;; fmul floating point multiply
+;; fmadd floating point multiply-add
+;; fdiv floating point divide
+;; fcmp floating point compare
+;; fcvt floating point convert
+;; fsqrt floating point square root
+;; multi multiword sequence (or user asm statements)
+;; nop no operation
+;; ghost an instruction that produces no real code
+(define_attr "type"
+ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
+ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
+ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
+ (cond [(eq_attr "jal" "!unset") (const_string "call")
+ (eq_attr "got" "load") (const_string "load")
+
+ (eq_attr "alu_type" "add,sub") (const_string "arith")
+
+ (eq_attr "alu_type" "and,or,xor") (const_string "logical")
+
+ ;; If a doubleword move uses these expensive instructions,
+ ;; it is usually better to schedule them in the same way
+ ;; as the singleword form, rather than as "multi".
+ (eq_attr "move_type" "load") (const_string "load")
+ (eq_attr "move_type" "fpload") (const_string "fpload")
+ (eq_attr "move_type" "store") (const_string "store")
+ (eq_attr "move_type" "fpstore") (const_string "fpstore")
+ (eq_attr "move_type" "mtc") (const_string "mtc")
+ (eq_attr "move_type" "mfc") (const_string "mfc")
+
+ ;; These types of move are always single insns.
+ (eq_attr "move_type" "fmove") (const_string "fmove")
+ (eq_attr "move_type" "arith") (const_string "arith")
+ (eq_attr "move_type" "logical") (const_string "logical")
+ (eq_attr "move_type" "andi") (const_string "logical")
+
+ ;; These types of move are always split.
+ (eq_attr "move_type" "shift_shift")
+ (const_string "multi")
+
+ ;; These types of move are split for doubleword modes only.
+ (and (eq_attr "move_type" "move,const")
+ (eq_attr "dword_mode" "yes"))
+ (const_string "multi")
+ (eq_attr "move_type" "move") (const_string "move")
+ (eq_attr "move_type" "const") (const_string "const")]
+ (const_string "unknown")))
+
+;; Mode for conversion types (fcvt)
+;; I2S integer to float single (SI/DI to SF)
+;; I2D integer to float double (SI/DI to DF)
+;; S2I float to integer (SF to SI/DI)
+;; D2I float to integer (DF to SI/DI)
+;; D2S double to float single
+;; S2D float single to double
+
+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
+ (const_string "unknown"))
+
+;; Length of instruction in bytes.
+(define_attr "length" ""
+ (cond [
+ ;; Direct branch instructions have a range of [-0x1000,0xffc],
+ ;; relative to the address of the delay slot. If a branch is
+ ;; outside this range, convert a branch like:
+ ;;
+ ;; bne r1,r2,target
+ ;;
+ ;; to:
+ ;;
+ ;; beq r1,r2,1f
+ ;; j target
+ ;; 1:
+ ;;
+ (eq_attr "type" "branch")
+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
+ (le (minus (pc) (match_dup 0)) (const_int 4092)))
+ (const_int 4)
+ (const_int 8))
+
+ ;; Conservatively assume calls take two instructions, as in:
+ ;; auipc t0, %pcrel_hi(target)
+ ;; jalr ra, t0, %lo(target)
+ ;; The linker will relax these into JAL when appropriate.
+ (eq_attr "type" "call")
+ (const_int 8)
+
+ ;; "Ghost" instructions occupy no space.
+ (eq_attr "type" "ghost")
+ (const_int 0)
+
+ (eq_attr "got" "load") (const_int 8)
+
+ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
+ (eq_attr "move_type" "shift_shift")
+ (const_int 8)
+
+ ;; Check for doubleword moves that are decomposed into two
+ ;; instructions.
+ (and (eq_attr "move_type" "mtc,mfc,move")
+ (eq_attr "dword_mode" "yes"))
+ (const_int 8)
+
+ ;; Doubleword CONST{,N} moves are split into two word
+ ;; CONST{,N} moves.
+ (and (eq_attr "move_type" "const")
+ (eq_attr "dword_mode" "yes"))
+ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
+
+ ;; Otherwise, constants, loads and stores are handled by external
+ ;; routines.
+ (eq_attr "move_type" "load,fpload")
+ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
+ (eq_attr "move_type" "store,fpstore")
+ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
+ ] (const_int 4)))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "multi")])
+
+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
+;; from the same template.
+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
+(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
+
+;; A copy of GPR that can be used when a pattern has two independent
+;; modes.
+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; 32-bit integer moves for which we provide move patterns.
+(define_mode_iterator IMOVE32 [SI])
+
+;; 64-bit modes for which we provide move patterns.
+(define_mode_iterator MOVE64 [DI DF])
+
+;; 128-bit modes for which we provide move patterns on 64-bit targets.
+(define_mode_iterator MOVE128 [TI TF])
+
+;; This mode iterator allows the QI and HI extension patterns to be
+;; defined from the same template.
+(define_mode_iterator SHORT [QI HI])
+
+;; Likewise the 64-bit truncate-and-shift patterns.
+(define_mode_iterator SUBDI [QI HI SI])
+(define_mode_iterator HISI [HI SI])
+(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
+
+;; This mode iterator allows :ANYF to be used wherever a scalar or vector
+;; floating-point mode is allowed.
+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT")])
+(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
+ (SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT")])
+
+;; Like ANYF, but only applies to scalar modes.
+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT")])
+
+;; A floating-point mode for which moves involving FPRs may need to be split.
+(define_mode_iterator SPLITF
+ [(DF "!TARGET_64BIT")
+ (DI "!TARGET_64BIT")
+ (TF "TARGET_64BIT")])
+
+;; This attribute gives the length suffix for a sign- or zero-extension
+;; instruction.
+(define_mode_attr size [(QI "b") (HI "h")])
+
+;; Mode attributes for loads.
+(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
+
+;; Instruction names for stores.
+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
+
+;; This attribute gives the best constraint to use for registers of
+;; a given mode.
+(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
+
+;; This attribute gives the format suffix for floating-point operations.
+(define_mode_attr fmt [(SF "s") (DF "d")])
+
+;; This attribute gives the format suffix for atomic memory operations.
+(define_mode_attr amo [(SI "w") (DI "d")])
+
+;; This attribute gives the upper-case mode name for one unit of a
+;; floating-point mode.
+(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
+
+;; This attribute gives the integer mode that has half the size of
+;; the controlling mode.
+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
+
+;; This code iterator allows signed and unsigned widening multiplications
+;; to use the same template.
+(define_code_iterator any_extend [sign_extend zero_extend])
+
+;; This code iterator allows the two right shift instructions to be
+;; generated from the same template.
+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
+
+;; This code iterator allows the three shift instructions to be generated
+;; from the same template.
+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
+
+;; This code iterator allows unsigned and signed division to be generated
+;; from the same template.
+(define_code_iterator any_div [div udiv])
+
+;; This code iterator allows unsigned and signed modulus to be generated
+;; from the same template.
+(define_code_iterator any_mod [mod umod])
+
+;; These code iterators allow the signed and unsigned scc operations to use
+;; the same template.
+(define_code_iterator any_gt [gt gtu])
+(define_code_iterator any_ge [ge geu])
+(define_code_iterator any_lt [lt ltu])
+(define_code_iterator any_le [le leu])
+
+;; expands to an empty string when doing a signed operation and
+;; "u" when doing an unsigned operation.
+(define_code_attr u [(sign_extend "") (zero_extend "u")
+ (div "") (udiv "u")
+ (mod "") (umod "u")
+ (gt "") (gtu "u")
+ (ge "") (geu "u")
+ (lt "") (ltu "u")
+ (le "") (leu "u")])
+
+;; is like , but the signed form expands to "s" rather than "".
+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
+
+;; expands to the name of the optab for a particular code.
+(define_code_attr optab [(ashift "ashl")
+ (ashiftrt "ashr")
+ (lshiftrt "lshr")
+ (ior "ior")
+ (xor "xor")
+ (and "and")
+ (plus "add")
+ (minus "sub")])
+
+;; expands to the name of the insn that implements a particular code.
+(define_code_attr insn [(ashift "sll")
+ (ashiftrt "sra")
+ (lshiftrt "srl")
+ (ior "or")
+ (xor "xor")
+ (and "and")
+ (plus "add")
+ (minus "sub")])
+
+;; Pipeline descriptions.
+;;
+;; generic.md provides a fallback for processors without a specific
+;; pipeline description. It is derived from the old define_function_unit
+;; version and uses the "alu" and "imuldiv" units declared below.
+;;
+;; Some of the processor-specific files are also derived from old
+;; define_function_unit descriptions and simply override the parts of
+;; generic.md that don't apply. The other processor-specific files
+;; are self-contained.
+(define_automaton "alu,imuldiv")
+
+(define_cpu_unit "alu" "alu")
+(define_cpu_unit "imuldiv" "imuldiv")
+
+;; Ghost instructions produce no real code and introduce no hazards.
+;; They exist purely to express an effect on dataflow.
+(define_insn_reservation "ghost" 0
+ (eq_attr "type" "ghost")
+ "nothing")
+
+(include "generic.md")
+
+;;
+;; ....................
+;;
+;; ADDITION
+;;
+;; ....................
+;;
+
+(define_insn "add3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ ""
+ "fadd.\t%0,%1,%2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "")])
+
+(define_expand "add3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (plus:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "arith_operand")))]
+ "")
+
+(define_insn "*addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
+ (match_operand:GPR2 2 "arith_operand" "r,Q")))]
+ ""
+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
+ (match_operand:DI 2 "arith_operand" "r,Q")))]
+ "TARGET_64BIT"
+ "add\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "DI")])
+
+(define_insn "*addsi3_extended"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,Q"))))]
+ "TARGET_64BIT"
+ "addw\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*adddisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
+ (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
+ "TARGET_64BIT"
+ "addw\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*adddisisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "arith_operand" "r,Q")))]
+ "TARGET_64BIT"
+ "addw\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*adddi3_truncsi"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (truncate:SI
+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
+ "TARGET_64BIT"
+ "addw\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+;;
+;; ....................
+;;
+;; SUBTRACTION
+;;
+;; ....................
+;;
+
+(define_insn "sub3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ ""
+ "fsub.\t%0,%1,%2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "")])
+
+(define_expand "sub3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
+ (match_operand:GPR 2 "register_operand")))]
+ "")
+
+(define_insn "*subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "sub\t%0,%z1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "DI")])
+
+(define_insn "*subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
+ (match_operand:GPR2 2 "register_operand" "r")))]
+ ""
+ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsi3_extended"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_64BIT"
+ "subw\t%0,%z1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "DI")])
+
+(define_insn "*subdisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
+ "TARGET_64BIT"
+ "subw\t%0,%z1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subdisisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_64BIT"
+ "subw\t%0,%z1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subsidisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
+ "TARGET_64BIT"
+ "subw\t%0,%z1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "*subdi3_truncsi"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (truncate:SI
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
+ "TARGET_64BIT"
+ "subw\t%0,%z1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+;;
+;; ....................
+;;
+;; MULTIPLICATION
+;;
+;; ....................
+;;
+
+(define_insn "mul3"
+ [(set (match_operand:SCALARF 0 "register_operand" "=f")
+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
+ ""
+ "fmul.\t%0,%1,%2"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "")])
+
+(define_expand "mul3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
+ (match_operand:GPR 2 "register_operand")))]
+ "TARGET_MULDIV")
+
+(define_insn "*mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:GPR 1 "register_operand" "r")
+ (match_operand:GPR2 2 "register_operand" "r")))]
+ "TARGET_MULDIV"
+ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+(define_insn "*muldisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "mulw\t%0,%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+(define_insn "*muldi3_truncsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (mult:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r"))))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "mulw\t%0,%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+(define_insn "*muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "mul\t%0,%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "DI")])
+
+;;
+;; ........................
+;;
+;; MULTIPLICATION HIGH-PART
+;;
+;; ........................
+;;
+
+
+;; Using a clobber here is ghetto, but I'm not smart enough to do better. '
+(define_insn_and_split "mulditi3"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (mult:TI (any_extend:TI
+ (match_operand:DI 1 "register_operand" "r"))
+ (any_extend:TI
+ (match_operand:DI 2 "register_operand" "r"))))
+ (clobber (match_scratch:DI 3 "=r"))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "#"
+ "reload_completed"
+ [
+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 4) (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (any_extend:TI (match_dup 1))
+ (any_extend:TI (match_dup 2)))
+ (const_int 64))))
+ (set (match_dup 5) (match_dup 3))
+ ]
+{
+ operands[4] = riscv_subword (operands[0], true);
+ operands[5] = riscv_subword (operands[0], false);
+}
+ )
+
+(define_insn "muldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (any_extend:TI
+ (match_operand:DI 1 "register_operand" "r"))
+ (any_extend:TI
+ (match_operand:DI 2 "register_operand" "r")))
+ (const_int 64))))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "mulh\t%0,%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "DI")])
+
+
+(define_insn_and_split "usmulditi3"
+ [(set (match_operand:TI 0 "register_operand" "=r")
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "register_operand" "r"))
+ (sign_extend:TI
+ (match_operand:DI 2 "register_operand" "r"))))
+ (clobber (match_scratch:DI 3 "=r"))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "#"
+ "reload_completed"
+ [
+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 4) (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (const_int 64))))
+ (set (match_dup 5) (match_dup 3))
+ ]
+{
+ operands[4] = riscv_subword (operands[0], true);
+ operands[5] = riscv_subword (operands[0], false);
+}
+ )
+
+(define_insn "usmuldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "register_operand" "r"))
+ (sign_extend:TI
+ (match_operand:DI 2 "register_operand" "r")))
+ (const_int 64))))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "mulhsu\t%0,%2,%1"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "DI")])
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (any_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (any_extend:DI
+ (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=r"))]
+ "TARGET_MULDIV && !TARGET_64BIT"
+{
+ rtx temp = gen_reg_rtx (SImode);
+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
+ emit_insn (gen_mulsi3_highpart (riscv_subword (operands[0], true),
+ operands[1], operands[2]));
+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
+ DONE;
+}
+ )
+
+(define_insn "mulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (any_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (any_extend:DI
+ (match_operand:SI 2 "register_operand" "r")))
+ (const_int 32))))]
+ "TARGET_MULDIV && !TARGET_64BIT"
+ "mulh\t%0,%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+
+(define_expand "usmulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=r"))]
+ "TARGET_MULDIV && !TARGET_64BIT"
+{
+ rtx temp = gen_reg_rtx (SImode);
+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
+ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
+ operands[1], operands[2]));
+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
+ DONE;
+}
+ )
+
+(define_insn "usmulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "r")))
+ (const_int 32))))]
+ "TARGET_MULDIV && !TARGET_64BIT"
+ "mulhsu\t%0,%2,%1"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+;;
+;; ....................
+;;
+;; DIVISION and REMAINDER
+;;
+;; ....................
+;;
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (any_div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MULDIV"
+ { return TARGET_64BIT ? "divw\t%0,%1,%2" : "div\t%0,%1,%2"; }
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "SI")])
+
+(define_insn "divdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (any_div:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "div\t%0,%1,%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "DI")])
+
+(define_insn "modsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (any_mod:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MULDIV"
+ { return TARGET_64BIT ? "remw\t%0,%1,%2" : "rem\t%0,%1,%2"; }
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "SI")])
+
+(define_insn "moddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (any_mod:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_MULDIV && TARGET_64BIT"
+ "rem\t%0,%1,%2"
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "DI")])
+
+(define_insn "div3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FDIV"
+ "fdiv.\t%0,%1,%2"
+ [(set_attr "type" "fdiv")
+ (set_attr "mode" "")])
+
+;;
+;; ....................
+;;
+;; SQUARE ROOT
+;;
+;; ....................
+
+(define_insn "sqrt2"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FDIV"
+{
+ return "fsqrt.\t%0,%1";
+}
+ [(set_attr "type" "fsqrt")
+ (set_attr "mode" "")])
+
+;; Floating point multiply accumulate instructions.
+
+(define_insn "fma4"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (fma:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fmadd.\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "")])
+
+(define_insn "fms4"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (fma:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")
+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "fmsub.\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "")])
+
+(define_insn "nfma4"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF
+ (fma:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "fnmadd.\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "")])
+
+(define_insn "nfms4"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF
+ (fma:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")
+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
+ "TARGET_HARD_FLOAT"
+ "fnmsub.\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "")])
+
+;; modulo signed zeros, -(a*b+c) == -c-a*b
+(define_insn "*nfma4_fastmath"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF
+ (match_operand:ANYF 3 "register_operand" "f")
+ (mult:ANYF
+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
+ (match_operand:ANYF 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)"
+ "fnmadd.\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "")])
+
+;; modulo signed zeros, -(a*b-c) == c-a*b
+(define_insn "*nfms4_fastmath"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF
+ (match_operand:ANYF 3 "register_operand" "f")
+ (mult:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (mode)"
+ "fnmsub.\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "")])
+
+;;
+;; ....................
+;;
+;; ABSOLUTE VALUE
+;;
+;; ....................
+
+(define_insn "abs2"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fabs.\t%0,%1"
+ [(set_attr "type" "fmove")
+ (set_attr "mode" "")])
+
+
+;;
+;; ....................
+;;
+;; MIN/MAX
+;;
+;; ....................
+
+(define_insn "smin3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fmin.\t%0,%1,%2"
+ [(set_attr "type" "fmove")
+ (set_attr "mode" "")])
+
+(define_insn "smax3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fmax.\t%0,%1,%2"
+ [(set_attr "type" "fmove")
+ (set_attr "mode" "")])
+
+
+;;
+;; ....................
+;;
+;; NEGATION and ONE'S COMPLEMENT '
+;;
+;; ....................
+
+(define_insn "neg2"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fneg.\t%0,%1"
+ [(set_attr "type" "fmove")
+ (set_attr "mode" "")])
+
+(define_insn "one_cmpl2"
+ [(set (match_operand:GPR 0 "register_operand" "=r")
+ (not:GPR (match_operand:GPR 1 "register_operand" "r")))]
+ ""
+ "not\t%0,%1"
+ [(set_attr "type" "logical")
+ (set_attr "mode" "")])
+
+;;
+;; ....................
+;;
+;; LOGICAL
+;;
+;; ....................
+;;
+
+(define_insn "and3"
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
+ (and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
+ ""
+ "and\t%0,%1,%2"
+ [(set_attr "type" "logical")
+ (set_attr "mode" "")])
+
+(define_insn "ior3"
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
+ (ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
+ ""
+ "or\t%0,%1,%2"
+ [(set_attr "type" "logical")
+ (set_attr "mode" "")])
+
+(define_insn "xor3"
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
+ (xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
+ ""
+ "xor\t%0,%1,%2"
+ [(set_attr "type" "logical")
+ (set_attr "mode" "")])
+
+;;
+;; ....................
+;;
+;; TRUNCATION
+;;
+;; ....................
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.s.d\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "D2S")
+ (set_attr "mode" "SF")])
+
+;; Integer truncation patterns. Truncating to HImode/QImode is a no-op.
+;; Truncating from DImode to SImode is not, because we always keep SImode
+;; values sign-extended in a register so we can safely use DImode branches
+;; and comparisons on SImode values.
+
+(define_insn "truncdisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
+ (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
+ "TARGET_64BIT"
+ "@
+ sext.w\t%0,%1
+ sw\t%1,%0"
+ [(set_attr "move_type" "arith,store")
+ (set_attr "mode" "SI")])
+
+;; Combiner patterns to optimize shift/truncate combinations.
+
+(define_insn "*ashr_trunc"
+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
+ (truncate:SUBDI
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "const_arith_operand" ""))))]
+ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
+ "sra\t%0,%1,%2"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "")])
+
+(define_insn "*lshr32_trunc"
+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
+ (truncate:SUBDI
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 32))))]
+ "TARGET_64BIT"
+ "sra\t%0,%1,32"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "")])
+
+;;
+;; ....................
+;;
+;; ZERO EXTENSION
+;;
+;; ....................
+
+;; Extension insns.
+
+(define_insn_and_split "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
+ "TARGET_64BIT"
+ "@
+ #
+ lwu\t%0,%1"
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (const_int 32)))
+ (set (match_dup 0)
+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
+ { operands[1] = gen_lowpart (DImode, operands[1]); }
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "DI")])
+
+;; Combine is not allowed to convert this insn into a zero_extendsidi2
+;; because of TRULY_NOOP_TRUNCATION.
+
+(define_insn_and_split "*clear_upper32"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
+ (const_int 4294967295)))]
+ "TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ return "#";
+
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ return "lwu\t%0,%1";
+}
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (const_int 32)))
+ (set (match_dup 0)
+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
+ ""
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "DI")])
+
+(define_insn_and_split "zero_extendhi2"
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
+ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ #
+ lhu\t%0,%1"
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0)
+ (ashift:GPR (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
+ {
+ operands[1] = gen_lowpart (mode, operands[1]);
+ operands[2] = GEN_INT(GET_MODE_BITSIZE(mode) - 16);
+ }
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "")])
+
+(define_insn "zero_extendqi2"
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
+ (zero_extend:SUPERQI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and\t%0,%1,0xff
+ lbu\t%0,%1"
+ [(set_attr "move_type" "andi,load")
+ (set_attr "mode" "")])
+
+;;
+;; ....................
+;;
+;; SIGN EXTENSION
+;;
+;; ....................
+
+;; Extension insns.
+;; Those for integer source operand are ordered widest source type first.
+
+;; When TARGET_64BIT, all SImode integer registers should already be in
+;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2). We can
+;; therefore get rid of register->register instructions if we constrain
+;; the source to be in the same register as the destination.
+;;
+;; The register alternative has type "arith" so that the pre-reload
+;; scheduler will treat it as a move. This reflects what happens if
+;; the register alternative needs a reload.
+(define_insn_and_split "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_64BIT"
+ "@
+ #
+ lw\t%0,%1"
+ "&& reload_completed && register_operand (operands[1], VOIDmode)"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+ }
+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
+}
+ [(set_attr "move_type" "move,load")
+ (set_attr "mode" "DI")])
+
+(define_insn_and_split "extend2"
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
+ (sign_extend:SUPERQI
+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ #
+ l\t%0,%1"
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
+{
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
+ - GET_MODE_BITSIZE (mode));
+}
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "SI")])
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.d.s\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "S2D")
+ (set_attr "mode" "DF")])
+
+;;
+;; ....................
+;;
+;; CONVERSIONS
+;;
+;; ....................
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.w.d %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")])
+
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.w.s %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")])
+
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.l.d %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")])
+
+
+(define_insn "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.l.s %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")])
+
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.d.w\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "I2D")])
+
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.d.l\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "I2D")])
+
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.s.w\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "I2S")])
+
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.s.l\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "I2S")])
+
+
+(define_insn "floatunssidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.d.wu\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "I2D")])
+
+
+(define_insn "floatunsdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.d.lu\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "I2D")])
+
+
+(define_insn "floatunssisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.s.wu\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "I2S")])
+
+
+(define_insn "floatunsdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.s.lu\t%0,%z1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "I2S")])
+
+
+(define_insn "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.wu.d %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")])
+
+
+(define_insn "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcvt.wu.s %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")])
+
+
+(define_insn "fixuns_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.lu.d %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")])
+
+
+(define_insn "fixuns_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
+ "fcvt.lu.s %0,%1,rtz"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")])
+
+;;
+;; ....................
+;;
+;; DATA MOVEMENT
+;;
+;; ....................
+
+;; Lower-level instructions for loading an address from the GOT.
+;; We could use MEMs, but an unspec gives more optimization
+;; opportunities.
+
+(define_insn "got_load"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
+ UNSPEC_LOAD_GOT))]
+ "flag_pic"
+ "la\t%0,%1"
+ [(set_attr "got" "load")
+ (set_attr "mode" "")])
+
+(define_insn "tls_add_tp_le"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_operand" "r")
+ (match_operand:P 2 "register_operand" "r")
+ (match_operand:P 3 "symbolic_operand" "")]
+ UNSPEC_TLS_LE))]
+ "!flag_pic || flag_pie"
+ "add\t%0,%1,%2,%%tprel_add(%3)"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "")])
+
+(define_insn "got_load_tls_gd"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
+ UNSPEC_TLS_GD))]
+ "flag_pic"
+ "la.tls.gd\t%0,%1"
+ [(set_attr "got" "load")
+ (set_attr "mode" "")])
+
+(define_insn "got_load_tls_ie"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
+ UNSPEC_TLS_IE))]
+ "flag_pic"
+ "la.tls.ie\t%0,%1"
+ [(set_attr "got" "load")
+ (set_attr "mode" "")])
+
+;; Instructions for adding the low 16 bits of an address to a register.
+;; Operand 2 is the address: riscv_print_operand works out which relocation
+;; should be applied.
+
+(define_insn "*low"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (lo_sum:P (match_operand:P 1 "register_operand" "r")
+ (match_operand:P 2 "immediate_operand" "")))]
+ ""
+ "add\t%0,%1,%R2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "")])
+
+;; Allow combine to split complex const_int load sequences, using operand 2
+;; to store the intermediate results. See move_operand for details.
+(define_split
+ [(set (match_operand:GPR 0 "register_operand")
+ (match_operand:GPR 1 "splittable_const_int_operand"))
+ (clobber (match_operand:GPR 2 "register_operand"))]
+ ""
+ [(const_int 0)]
+{
+ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
+ DONE;
+})
+
+;; Likewise, for symbolic operands.
+(define_split
+ [(set (match_operand:P 0 "register_operand")
+ (match_operand:P 1))
+ (clobber (match_operand:P 2 "register_operand"))]
+ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
+ [(set (match_dup 0) (match_dup 3))]
+{
+ riscv_split_symbol (operands[2], operands[1],
+ MAX_MACHINE_MODE, &operands[3]);
+})
+
+;; 64-bit integer moves
+
+;; Unlike most other insns, the move insns can't be split with '
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "")
+ (match_operand:DI 1 ""))]
+ ""
+{
+ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movdi_32bit"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
+ (match_operand:DI 1 "move_operand" "r,i,m,r,*J*r,*m,*f,*f"))]
+ "!TARGET_64BIT
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movdi_64bit"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
+ (match_operand:DI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
+ "TARGET_64BIT
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
+ (set_attr "mode" "DI")])
+
+;; 32-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+
+(define_expand "mov"
+ [(set (match_operand:IMOVE32 0 "")
+ (match_operand:IMOVE32 1 ""))]
+ ""
+{
+ if (riscv_legitimize_move (mode, operands[0], operands[1]))
+ DONE;
+})
+
+;; The difference between these two is whether or not ints are allowed
+;; in FP registers (off by default, use -mdebugh to enable).
+
+(define_insn "*mov_internal"
+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
+ (match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
+ "(register_operand (operands[0], mode)
+ || reg_or_0_operand (operands[1], mode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
+ (set_attr "mode" "SI")])
+
+;; 16-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "")
+ (match_operand:HI 1 ""))]
+ ""
+{
+ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
+ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
+ "(register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
+ (set_attr "mode" "HI")])
+
+;; HImode constant generation; see riscv_move_integer for details.
+;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
+ ""
+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
+ [(set_attr "type" "arith")
+ (set_attr "mode" "HI")])
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
+ ""
+ "xor\t%0,%1,%2"
+ [(set_attr "type" "logical")
+ (set_attr "mode" "HI")])
+
+;; 8-bit Integer moves
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "")
+ (match_operand:QI 1 ""))]
+ ""
+{
+ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
+ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
+ "(register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
+ (set_attr "mode" "QI")])
+
+;; 32-bit floating point moves
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "")
+ (match_operand:SF 1 ""))]
+ ""
+{
+ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movsf_hardfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
+ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
+ "TARGET_HARD_FLOAT
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+ (set_attr "mode" "SF")])
+
+(define_insn "*movsf_softfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
+ "TARGET_SOFT_FLOAT
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,load,store")
+ (set_attr "mode" "SF")])
+
+;; 64-bit floating point moves
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "")
+ (match_operand:DF 1 ""))]
+ ""
+{
+ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
+ DONE;
+})
+
+;; In RV32, we lack mtf.d/mff.d. Go through memory instead.
+;; (except for moving a constant 0 to an FPR. for that we use fcvt.d.w.)
+(define_insn "*movdf_hardfloat_rv32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
+ "!TARGET_64BIT && TARGET_HARD_FLOAT
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
+ (set_attr "mode" "DF")])
+
+(define_insn "*movdf_hardfloat_rv64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
+ "TARGET_64BIT && TARGET_HARD_FLOAT
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+ (set_attr "mode" "DF")])
+
+(define_insn "*movdf_softfloat"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
+ "TARGET_SOFT_FLOAT
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ { return riscv_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,load,store")
+ (set_attr "mode" "DF")])
+
+;; 128-bit integer moves
+
+(define_expand "movti"
+ [(set (match_operand:TI 0)
+ (match_operand:TI 1))]
+ "TARGET_64BIT"
+{
+ if (riscv_legitimize_move (TImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
+ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
+ "TARGET_64BIT
+ && (register_operand (operands[0], TImode)
+ || reg_or_0_operand (operands[1], TImode))"
+ "#"
+ [(set_attr "move_type" "move,const,load,store")
+ (set_attr "mode" "TI")])
+
+(define_split
+ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
+ (match_operand:MOVE64 1 "move_operand"))]
+ "reload_completed && !TARGET_64BIT
+ && riscv_split_64bit_move_p (operands[0], operands[1])"
+ [(const_int 0)]
+{
+ riscv_split_doubleword_move (operands[0], operands[1]);
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:MOVE128 0 "nonimmediate_operand")
+ (match_operand:MOVE128 1 "move_operand"))]
+ "TARGET_64BIT && reload_completed"
+ [(const_int 0)]
+{
+ riscv_split_doubleword_move (operands[0], operands[1]);
+ DONE;
+})
+
+;; 64-bit paired-single floating point moves
+
+;; Load the low word of operand 0 with operand 1.
+(define_insn "load_low"
+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+ (unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m")]
+ UNSPEC_LOAD_LOW))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[0] = riscv_subword (operands[0], 0);
+ return riscv_output_move (operands[0], operands[1]);
+}
+ [(set_attr "move_type" "mtc,fpload")
+ (set_attr "mode" "")])
+
+;; Load the high word of operand 0 from operand 1, preserving the value
+;; in the low word.
+(define_insn "load_high"
+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+ (unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m")
+ (match_operand:SPLITF 2 "register_operand" "0,0")]
+ UNSPEC_LOAD_HIGH))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[0] = riscv_subword (operands[0], 1);
+ return riscv_output_move (operands[0], operands[1]);
+}
+ [(set_attr "move_type" "mtc,fpload")
+ (set_attr "mode" "")])
+
+;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
+;; high word and 0 to store the low word.
+(define_insn "store_word"
+ [(set (match_operand: 0 "nonimmediate_operand" "=r,m")
+ (unspec: [(match_operand:SPLITF 1 "register_operand" "f,f")
+ (match_operand 2 "const_int_operand")]
+ UNSPEC_STORE_WORD))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
+ return riscv_output_move (operands[0], operands[1]);
+}
+ [(set_attr "move_type" "mfc,fpstore")
+ (set_attr "mode" "")])
+
+;; Expand in-line code to clear the instruction cache between operand[0] and
+;; operand[1].
+(define_expand "clear_cache"
+ [(match_operand 0 "pmode_register_operand")
+ (match_operand 1 "pmode_register_operand")]
+ ""
+ "
+{
+ emit_insn(gen_fence_i());
+ DONE;
+}")
+
+(define_insn "fence"
+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
+ ""
+ "%|fence%-")
+
+(define_insn "fence_i"
+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
+ ""
+ "fence.i")
+
+;; Block moves, see riscv.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand")
+ (match_operand:BLK 1 "general_operand"))
+ (use (match_operand:SI 2 ""))
+ (use (match_operand:SI 3 "const_int_operand"))])]
+ "!TARGET_MEMCPY"
+{
+ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
+ DONE;
+ else
+ FAIL;
+})
+
+;;
+;; ....................
+;;
+;; SHIFTS
+;;
+;; ....................
+
+(define_insn "si3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (SImode) - 1));
+
+ return TARGET_64BIT ? "w\t%0,%1,%2" : "\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*disi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
+ (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
+ "TARGET_64BIT"
+ "w\t%0,%1,%2"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashldi3_truncsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "const_arith_operand" "I"))))]
+ "TARGET_64BIT && INTVAL (operands[2]) < 32"
+ "sllw\t%0,%1,%2"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*ashldisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:GPR 1 "register_operand" "r")
+ (match_operand:GPR2 2 "arith_operand" "rI")))]
+ "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
+ "sllw\t%0,%1,%2"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")])
+
+(define_insn "di3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_operand" "rI")))]
+ "TARGET_64BIT"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (DImode) - 1));
+
+ return "\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "DI")])
+
+(define_insn "si3_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI
+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))))]
+ "TARGET_64BIT"
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return "w\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")])
+
+;;
+;; ....................
+;;
+;; CONDITIONAL BRANCHES
+;;
+;; ....................
+
+;; Conditional branches
+
+(define_insn "*branch_order"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "order_operator"
+ [(match_operand:GPR 2 "register_operand" "r")
+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ if (GET_CODE (operands[3]) == CONST_INT)
+ return "b%C1z\t%2,%0";
+ return "b%C1\t%2,%3,%0";
+}
+ [(set_attr "type" "branch")
+ (set_attr "mode" "none")])
+
+;; Used to implement built-in functions.
+(define_expand "condjump"
+ [(set (pc)
+ (if_then_else (match_operand 0)
+ (label_ref (match_operand 1))
+ (pc)))])
+
+(define_expand "cbranch4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "nonmemory_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+{
+ riscv_expand_conditional_branch (operands);
+ DONE;
+})
+
+(define_expand "cbranch4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SCALARF 1 "register_operand")
+ (match_operand:SCALARF 2 "register_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+{
+ riscv_expand_conditional_branch (operands);
+ DONE;
+})
+
+(define_insn_and_split "*branch_on_bit"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_operator"
+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
+ (const_int 1)
+ (match_operand 3 "const_int_operand"))
+ (const_int 0)])
+ (label_ref (match_operand 1))
+ (pc)))
+ (clobber (match_scratch:GPR 4 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 4)
+ (ashift:GPR (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else
+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
+ (label_ref (match_operand 1))
+ (pc)))]
+{
+ int shift = GET_MODE_BITSIZE (mode) - 1 - INTVAL (operands[3]);
+ operands[3] = GEN_INT (shift);
+
+ if (GET_CODE (operands[0]) == EQ)
+ operands[0] = gen_rtx_GE (mode, operands[4], const0_rtx);
+ else
+ operands[0] = gen_rtx_LT (mode, operands[4], const0_rtx);
+})
+
+(define_insn_and_split "*branch_on_bit_range"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "equality_operator"
+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
+ (match_operand 3 "const_int_operand")
+ (const_int 0))
+ (const_int 0)])
+ (label_ref (match_operand 1))
+ (pc)))
+ (clobber (match_scratch:GPR 4 "=&r"))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 4)
+ (ashift:GPR (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else
+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
+ (label_ref (match_operand 1))
+ (pc)))]
+{
+ operands[3] = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (operands[3]));
+})
+
+;;
+;; ....................
+;;
+;; SETTING A REGISTER FROM A COMPARISON
+;;
+;; ....................
+
+;; Destination is always set in SI mode.
+
+(define_expand "cstore4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "order_operator"
+ [(match_operand:GPR 2 "register_operand")
+ (match_operand:GPR 3 "nonmemory_operand")]))]
+ ""
+{
+ riscv_expand_scc (operands);
+ DONE;
+})
+
+(define_insn "cstore4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 1 "fp_order_operator"
+ [(match_operand:SCALARF 2 "register_operand" "f")
+ (match_operand:SCALARF 3 "register_operand" "f")]))]
+ "TARGET_HARD_FLOAT"
+ "f%C1.\t%0,%2,%3"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "")])
+
+(define_insn "*seq_zero_"
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
+ (const_int 0)))]
+ ""
+ "seqz\t%0,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "")])
+
+(define_insn "*sne_zero_"
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
+ (const_int 0)))]
+ ""
+ "snez\t%0,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "")])
+
+(define_insn "*sgt_"
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
+ ""
+ "slt\t%0,%z2,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "")])
+
+(define_insn "*sge_